1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/xe: Call __guc_exec_queue_fini_async direct for KERNEL exec_queues

Usually we call __guc_exec_queue_fini_async via a worker as the
exec_queue fini can be done from within the GPU scheduler which creates
a circular dependency without a worker. Kernel exec_queues are fini'd at
driver unload (not from within the GPU scheduler) so it is safe to
directly call __guc_exec_queue_fini_async.

Suggested-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Matthew Brost 2023-08-11 06:27:34 -07:00 committed by Rodrigo Vivi
parent ca8656a2eb
commit a20c75dba1

View file

@ -956,27 +956,19 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
if (!(q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
kfree(ge);
xe_exec_queue_fini(q);
}
kfree(ge);
xe_exec_queue_fini(q);
}
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
{
bool kernel = q->flags & EXEC_QUEUE_FLAG_KERNEL;
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
queue_work(system_wq, &q->guc->fini_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (kernel) {
struct xe_guc_exec_queue *ge = q->guc;
flush_work(&ge->fini_async);
kfree(ge);
xe_exec_queue_fini(q);
}
if (q->flags & EXEC_QUEUE_FLAG_KERNEL)
__guc_exec_queue_fini_async(&q->guc->fini_async);
else
queue_work(system_wq, &q->guc->fini_async);
}
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)