drm/i915/gt: Make deferred context allocation explicit
Refactor the backends to handle the deferred context allocation in a consistent manner, and allow calling it as an explicit first step in pinning a context for the first time. This should make it easier for backends to keep track of partially constructed contexts from initialisation. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190809182518.20486-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
72e2777593
commit
4c60b1aaa2
5 changed files with 55 additions and 19 deletions
|
@ -53,6 +53,14 @@ int __intel_context_do_pin(struct intel_context *ce)
|
||||||
if (likely(!atomic_read(&ce->pin_count))) {
|
if (likely(!atomic_read(&ce->pin_count))) {
|
||||||
intel_wakeref_t wakeref;
|
intel_wakeref_t wakeref;
|
||||||
|
|
||||||
|
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
|
||||||
|
err = ce->ops->alloc(ce);
|
||||||
|
if (unlikely(err))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
__set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
|
||||||
|
}
|
||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
|
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
|
||||||
err = ce->ops->pin(ce);
|
err = ce->ops->pin(ce);
|
||||||
|
|
|
@ -23,6 +23,8 @@ struct intel_context;
|
||||||
struct intel_ring;
|
struct intel_ring;
|
||||||
|
|
||||||
struct intel_context_ops {
|
struct intel_context_ops {
|
||||||
|
int (*alloc)(struct intel_context *ce);
|
||||||
|
|
||||||
int (*pin)(struct intel_context *ce);
|
int (*pin)(struct intel_context *ce);
|
||||||
void (*unpin)(struct intel_context *ce);
|
void (*unpin)(struct intel_context *ce);
|
||||||
|
|
||||||
|
@ -52,6 +54,9 @@ struct intel_context {
|
||||||
struct i915_vma *state;
|
struct i915_vma *state;
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
|
|
||||||
|
unsigned long flags;
|
||||||
|
#define CONTEXT_ALLOC_BIT 0
|
||||||
|
|
||||||
u32 *lrc_reg_state;
|
u32 *lrc_reg_state;
|
||||||
u64 lrc_desc;
|
u64 lrc_desc;
|
||||||
|
|
||||||
|
|
|
@ -219,8 +219,9 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
|
||||||
return container_of(engine, struct virtual_engine, base);
|
return container_of(engine, struct virtual_engine, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int execlists_context_deferred_alloc(struct intel_context *ce,
|
static int __execlists_context_alloc(struct intel_context *ce,
|
||||||
struct intel_engine_cs *engine);
|
struct intel_engine_cs *engine);
|
||||||
|
|
||||||
static void execlists_init_reg_state(u32 *reg_state,
|
static void execlists_init_reg_state(u32 *reg_state,
|
||||||
struct intel_context *ce,
|
struct intel_context *ce,
|
||||||
struct intel_engine_cs *engine,
|
struct intel_engine_cs *engine,
|
||||||
|
@ -1614,9 +1615,6 @@ __execlists_context_pin(struct intel_context *ce,
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = execlists_context_deferred_alloc(ce, engine);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
GEM_BUG_ON(!ce->state);
|
GEM_BUG_ON(!ce->state);
|
||||||
|
|
||||||
ret = intel_context_active_acquire(ce);
|
ret = intel_context_active_acquire(ce);
|
||||||
|
@ -1655,6 +1653,11 @@ static int execlists_context_pin(struct intel_context *ce)
|
||||||
return __execlists_context_pin(ce, ce->engine);
|
return __execlists_context_pin(ce, ce->engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int execlists_context_alloc(struct intel_context *ce)
|
||||||
|
{
|
||||||
|
return __execlists_context_alloc(ce, ce->engine);
|
||||||
|
}
|
||||||
|
|
||||||
static void execlists_context_reset(struct intel_context *ce)
|
static void execlists_context_reset(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -1678,6 +1681,8 @@ static void execlists_context_reset(struct intel_context *ce)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct intel_context_ops execlists_context_ops = {
|
static const struct intel_context_ops execlists_context_ops = {
|
||||||
|
.alloc = execlists_context_alloc,
|
||||||
|
|
||||||
.pin = execlists_context_pin,
|
.pin = execlists_context_pin,
|
||||||
.unpin = execlists_context_unpin,
|
.unpin = execlists_context_unpin,
|
||||||
|
|
||||||
|
@ -3075,8 +3080,8 @@ get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
|
||||||
return intel_timeline_create(gt, NULL);
|
return intel_timeline_create(gt, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int execlists_context_deferred_alloc(struct intel_context *ce,
|
static int __execlists_context_alloc(struct intel_context *ce,
|
||||||
struct intel_engine_cs *engine)
|
struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *ctx_obj;
|
struct drm_i915_gem_object *ctx_obj;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
@ -3085,9 +3090,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
|
||||||
struct intel_timeline *timeline;
|
struct intel_timeline *timeline;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ce->state)
|
GEM_BUG_ON(ce->state);
|
||||||
return 0;
|
|
||||||
|
|
||||||
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
|
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3533,6 +3536,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
|
||||||
|
|
||||||
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
|
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
|
||||||
|
|
||||||
|
err = __execlists_context_alloc(&ve->context, siblings[0]);
|
||||||
|
if (err)
|
||||||
|
goto err_put;
|
||||||
|
|
||||||
|
__set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags);
|
||||||
|
|
||||||
return &ve->context;
|
return &ve->context;
|
||||||
|
|
||||||
err_put:
|
err_put:
|
||||||
|
|
|
@ -1480,16 +1480,16 @@ err_obj:
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ring_context_pin(struct intel_context *ce)
|
static int ring_context_alloc(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = ce->engine;
|
struct intel_engine_cs *engine = ce->engine;
|
||||||
int err;
|
|
||||||
|
|
||||||
/* One ringbuffer to rule them all */
|
/* One ringbuffer to rule them all */
|
||||||
GEM_BUG_ON(!engine->buffer);
|
GEM_BUG_ON(!engine->buffer);
|
||||||
ce->ring = engine->buffer;
|
ce->ring = engine->buffer;
|
||||||
|
|
||||||
if (!ce->state && engine->context_size) {
|
GEM_BUG_ON(ce->state);
|
||||||
|
if (engine->context_size) {
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
vma = alloc_context_vma(engine);
|
vma = alloc_context_vma(engine);
|
||||||
|
@ -1499,6 +1499,13 @@ static int ring_context_pin(struct intel_context *ce)
|
||||||
ce->state = vma;
|
ce->state = vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ring_context_pin(struct intel_context *ce)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
err = intel_context_active_acquire(ce);
|
err = intel_context_active_acquire(ce);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -1520,6 +1527,8 @@ static void ring_context_reset(struct intel_context *ce)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct intel_context_ops ring_context_ops = {
|
static const struct intel_context_ops ring_context_ops = {
|
||||||
|
.alloc = ring_context_alloc,
|
||||||
|
|
||||||
.pin = ring_context_pin,
|
.pin = ring_context_pin,
|
||||||
.unpin = ring_context_unpin,
|
.unpin = ring_context_unpin,
|
||||||
|
|
||||||
|
|
|
@ -147,16 +147,19 @@ static void mock_context_destroy(struct kref *ref)
|
||||||
intel_context_free(ce);
|
intel_context_free(ce);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mock_context_alloc(struct intel_context *ce)
|
||||||
|
{
|
||||||
|
ce->ring = mock_ring(ce->engine);
|
||||||
|
if (!ce->ring)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int mock_context_pin(struct intel_context *ce)
|
static int mock_context_pin(struct intel_context *ce)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!ce->ring) {
|
|
||||||
ce->ring = mock_ring(ce->engine);
|
|
||||||
if (!ce->ring)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = intel_context_active_acquire(ce);
|
ret = intel_context_active_acquire(ce);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -166,6 +169,8 @@ static int mock_context_pin(struct intel_context *ce)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct intel_context_ops mock_context_ops = {
|
static const struct intel_context_ops mock_context_ops = {
|
||||||
|
.alloc = mock_context_alloc,
|
||||||
|
|
||||||
.pin = mock_context_pin,
|
.pin = mock_context_pin,
|
||||||
.unpin = mock_context_unpin,
|
.unpin = mock_context_unpin,
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue