drm/ttm: remove ttm_bo_(un)lock_delayed_workqueue
Those functions never worked correctly since it is still perfectly possible that a buffer object is released and the background worker restarted even after calling them. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221125102137.1801-2-christian.koenig@amd.com
This commit is contained in:
parent
3ea44105bd
commit
cd3a8a5962
6 changed files with 3 additions and 46 deletions
|
@ -1717,7 +1717,7 @@ no_preempt:
|
||||||
|
|
||||||
static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
|
static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
|
||||||
{
|
{
|
||||||
int r, resched, length;
|
int r, length;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct dma_fence **fences = NULL;
|
struct dma_fence **fences = NULL;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)data;
|
struct amdgpu_device *adev = (struct amdgpu_device *)data;
|
||||||
|
@ -1747,8 +1747,6 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
|
||||||
/* stop the scheduler */
|
/* stop the scheduler */
|
||||||
kthread_park(ring->sched.thread);
|
kthread_park(ring->sched.thread);
|
||||||
|
|
||||||
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
||||||
|
|
||||||
/* preempt the IB */
|
/* preempt the IB */
|
||||||
r = amdgpu_ring_preempt_ib(ring);
|
r = amdgpu_ring_preempt_ib(ring);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -1785,8 +1783,6 @@ failure:
|
||||||
|
|
||||||
up_read(&adev->reset_domain->sem);
|
up_read(&adev->reset_domain->sem);
|
||||||
|
|
||||||
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
|
|
||||||
|
|
||||||
pro_end:
|
pro_end:
|
||||||
kfree(fences);
|
kfree(fences);
|
||||||
|
|
||||||
|
|
|
@ -3983,10 +3983,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
amdgpu_fence_driver_hw_fini(adev);
|
amdgpu_fence_driver_hw_fini(adev);
|
||||||
|
|
||||||
if (adev->mman.initialized) {
|
if (adev->mman.initialized)
|
||||||
flush_delayed_work(&adev->mman.bdev.wq);
|
flush_delayed_work(&adev->mman.bdev.wq);
|
||||||
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adev->pm_sysfs_en)
|
if (adev->pm_sysfs_en)
|
||||||
amdgpu_pm_sysfs_fini(adev);
|
amdgpu_pm_sysfs_fini(adev);
|
||||||
|
|
|
@ -1772,7 +1772,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
||||||
bool saved = false;
|
bool saved = false;
|
||||||
|
|
||||||
int i, r;
|
int i, r;
|
||||||
int resched;
|
|
||||||
|
|
||||||
down_write(&rdev->exclusive_lock);
|
down_write(&rdev->exclusive_lock);
|
||||||
|
|
||||||
|
@ -1784,8 +1783,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
||||||
atomic_inc(&rdev->gpu_reset_counter);
|
atomic_inc(&rdev->gpu_reset_counter);
|
||||||
|
|
||||||
radeon_save_bios_scratch_regs(rdev);
|
radeon_save_bios_scratch_regs(rdev);
|
||||||
/* block TTM */
|
|
||||||
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
|
|
||||||
radeon_suspend(rdev);
|
radeon_suspend(rdev);
|
||||||
radeon_hpd_fini(rdev);
|
radeon_hpd_fini(rdev);
|
||||||
|
|
||||||
|
@ -1844,8 +1841,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
||||||
/* reset hpd state */
|
/* reset hpd state */
|
||||||
radeon_hpd_init(rdev);
|
radeon_hpd_init(rdev);
|
||||||
|
|
||||||
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
|
|
||||||
|
|
||||||
rdev->in_reset = true;
|
rdev->in_reset = true;
|
||||||
rdev->needs_reset = false;
|
rdev->needs_reset = false;
|
||||||
|
|
||||||
|
|
|
@ -1853,11 +1853,10 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
|
||||||
static void radeon_dynpm_idle_work_handler(struct work_struct *work)
|
static void radeon_dynpm_idle_work_handler(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
int resched;
|
|
||||||
rdev = container_of(work, struct radeon_device,
|
rdev = container_of(work, struct radeon_device,
|
||||||
pm.dynpm_idle_work.work);
|
pm.dynpm_idle_work.work);
|
||||||
|
|
||||||
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
|
|
||||||
mutex_lock(&rdev->pm.mutex);
|
mutex_lock(&rdev->pm.mutex);
|
||||||
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
|
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
|
||||||
int not_processed = 0;
|
int not_processed = 0;
|
||||||
|
@ -1908,7 +1907,6 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
|
||||||
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
|
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
|
||||||
}
|
}
|
||||||
mutex_unlock(&rdev->pm.mutex);
|
mutex_unlock(&rdev->pm.mutex);
|
||||||
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -418,20 +418,6 @@ void ttm_bo_put(struct ttm_buffer_object *bo)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_bo_put);
|
EXPORT_SYMBOL(ttm_bo_put);
|
||||||
|
|
||||||
int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
|
|
||||||
{
|
|
||||||
return cancel_delayed_work_sync(&bdev->wq);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
|
|
||||||
|
|
||||||
void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
|
|
||||||
{
|
|
||||||
if (resched)
|
|
||||||
schedule_delayed_work(&bdev->wq,
|
|
||||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
|
|
||||||
|
|
||||||
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
|
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
|
||||||
struct ttm_resource **mem,
|
struct ttm_resource **mem,
|
||||||
struct ttm_operation_ctx *ctx,
|
struct ttm_operation_ctx *ctx,
|
||||||
|
|
|
@ -290,22 +290,6 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
|
||||||
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
|
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
|
||||||
struct ttm_lru_bulk_move *bulk);
|
struct ttm_lru_bulk_move *bulk);
|
||||||
|
|
||||||
/**
|
|
||||||
* ttm_bo_lock_delayed_workqueue
|
|
||||||
*
|
|
||||||
* Prevent the delayed workqueue from running.
|
|
||||||
* Returns
|
|
||||||
* True if the workqueue was queued at the time
|
|
||||||
*/
|
|
||||||
int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ttm_bo_unlock_delayed_workqueue
|
|
||||||
*
|
|
||||||
* Allows the delayed workqueue to run.
|
|
||||||
*/
|
|
||||||
void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ttm_bo_eviction_valuable
|
* ttm_bo_eviction_valuable
|
||||||
*
|
*
|
||||||
|
|
Loading…
Add table
Reference in a new issue