1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/xe: Add child contexts to the GuC context lookup

The CAT_ERROR message from the GuC provides the guc id of the context
that caused the problem, which can be a child context. We therefore
need to be able to match that id to the exec_queue that owns it, which
we do by adding child context to the context lookup.

While at it, fix the error path of the guc id allocation code to
correctly free the ids allocated for parallel queues.

v2: rebase on s/XE_WARN_ON/xe_assert

Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/590
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Daniele Ceraolo Spurio 2023-09-14 14:48:02 -07:00 committed by Rodrigo Vivi
parent 0d0534750f
commit cb90d46918

View file

@ -247,10 +247,28 @@ int xe_guc_submit_init(struct xe_guc *guc)
return 0;
}
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
lockdep_assert_held(&guc->submission_state.lock);
for (i = 0; i < xa_count; ++i)
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
if (xe_exec_queue_is_parallel(q))
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
q->guc->id - GUC_ID_START_MLRC,
order_base_2(q->width));
else
ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
}
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{
int ret;
void *ptr;
int i;
/*
* Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
@ -277,30 +295,27 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
if (xe_exec_queue_is_parallel(q))
q->guc->id += GUC_ID_START_MLRC;
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
q->guc->id, q, GFP_NOWAIT);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
goto err_release;
for (i = 0; i < q->width; ++i) {
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
q->guc->id + i, q, GFP_NOWAIT);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
goto err_release;
}
}
return 0;
err_release:
ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
__release_guc_id(guc, q, i);
return ret;
}
static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{
mutex_lock(&guc->submission_state.lock);
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id);
if (xe_exec_queue_is_parallel(q))
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
q->guc->id - GUC_ID_START_MLRC,
order_base_2(q->width));
else
ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
__release_guc_id(guc, q, q->width);
mutex_unlock(&guc->submission_state.lock);
}
@ -1489,7 +1504,8 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
return NULL;
}
xe_assert(xe, q->guc->id == guc_id);
xe_assert(xe, guc_id >= q->guc->id);
xe_assert(xe, guc_id < (q->guc->id + q->width));
return q;
}