1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/i915: Release i915_gem_context from a worker

The only reason for this really is the i915_gem_engines->fence
callback engines_notify(), which exists purely as a fairly funky
reference counting scheme for that. Otherwise all other callers are
from process context, and generally fairly benign locking context.

Unfortunately untangling that requires some major surgery, and we have
a few i915_gem_context reference counting bugs that need fixing, and
they blow in the current hardirq calling context, so we need a
stop-gap measure.

Put a FIXME comment in when this should be removable again.

v2: Fix mock_context(), noticed by intel-gfx-ci.

Acked-by: Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20210902142057.929669-1-daniel.vetter@ffwll.ch
This commit is contained in:
Daniel Vetter 2021-09-02 16:20:47 +02:00
parent fb1e95bc27
commit 75eefd8258
3 changed files with 24 additions and 2 deletions

View file

@ -986,9 +986,10 @@ free_engines:
return err;
}
void i915_gem_context_release(struct kref *ref)
static void i915_gem_context_release_work(struct work_struct *work)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
release_work);
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
@ -1002,6 +1003,13 @@ void i915_gem_context_release(struct kref *ref)
kfree_rcu(ctx, rcu);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
queue_work(ctx->i915->wq, &ctx->release_work);
}
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
@ -1303,6 +1311,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
ctx->sched = pc->sched;
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);
INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);

View file

@ -288,6 +288,18 @@ struct i915_gem_context {
*/
struct kref ref;
/**
* @release_work:
*
* Work item for deferred cleanup, since i915_gem_context_put() tends to
* be called from hardirq context.
*
* FIXME: The only real reason for this is &i915_gem_engines.fence, all
* other callers are from process context and need at most some mild
* shuffling to pull the i915_gem_context_put() call out of a spinlock.
*/
struct work_struct release_work;
/**
* @rcu: rcu_head for deferred freeing.
*/

View file

@ -23,6 +23,7 @@ mock_context(struct drm_i915_private *i915,
kref_init(&ctx->ref);
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
mutex_init(&ctx->mutex);