drm/i915/gem: Limit struct_mutex to eb_reserve
We only need to serialise the multiple pinning during the eb_reserve
phase. Ideally this would be using the vm->mutex as an outer lock, or
using a composite global mutex (ww_mutex), but at the moment we are
using struct_mutex for the group.
Closes: https://gitlab.freedesktop.org/drm/intel/issues/1381
Fixes: 003d8b9143
("drm/i915/gem: Only call eb_lookup_vma once during execbuf ioctl")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200306071614.2846708-3-chris@chris-wilson.co.uk
This commit is contained in:
parent
26fc4e4ba1
commit
ef398881d2
2 changed files with 20 additions and 37 deletions
|
@ -611,7 +611,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
|
||||||
struct list_head last;
|
struct list_head last;
|
||||||
struct eb_vma *ev;
|
struct eb_vma *ev;
|
||||||
unsigned int i, pass;
|
unsigned int i, pass;
|
||||||
int err;
|
int err = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attempt to pin all of the buffers into the GTT.
|
* Attempt to pin all of the buffers into the GTT.
|
||||||
|
@ -627,8 +627,10 @@ static int eb_reserve(struct i915_execbuffer *eb)
|
||||||
* room for the earlier objects *unless* we need to defragment.
|
* room for the earlier objects *unless* we need to defragment.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
|
||||||
|
return -EINTR;
|
||||||
|
|
||||||
pass = 0;
|
pass = 0;
|
||||||
err = 0;
|
|
||||||
do {
|
do {
|
||||||
list_for_each_entry(ev, &eb->unbound, bind_link) {
|
list_for_each_entry(ev, &eb->unbound, bind_link) {
|
||||||
err = eb_reserve_vma(eb, ev, pin_flags);
|
err = eb_reserve_vma(eb, ev, pin_flags);
|
||||||
|
@ -636,7 +638,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!(err == -ENOSPC || err == -EAGAIN))
|
if (!(err == -ENOSPC || err == -EAGAIN))
|
||||||
return err;
|
break;
|
||||||
|
|
||||||
/* Resort *all* the objects into priority order */
|
/* Resort *all* the objects into priority order */
|
||||||
INIT_LIST_HEAD(&eb->unbound);
|
INIT_LIST_HEAD(&eb->unbound);
|
||||||
|
@ -667,7 +669,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
|
||||||
list_splice_tail(&last, &eb->unbound);
|
list_splice_tail(&last, &eb->unbound);
|
||||||
|
|
||||||
if (err == -EAGAIN) {
|
if (err == -EAGAIN) {
|
||||||
|
mutex_unlock(&eb->i915->drm.struct_mutex);
|
||||||
flush_workqueue(eb->i915->mm.userptr_wq);
|
flush_workqueue(eb->i915->mm.userptr_wq);
|
||||||
|
mutex_lock(&eb->i915->drm.struct_mutex);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -681,15 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb)
|
||||||
err = i915_gem_evict_vm(eb->context->vm);
|
err = i915_gem_evict_vm(eb->context->vm);
|
||||||
mutex_unlock(&eb->context->vm->mutex);
|
mutex_unlock(&eb->context->vm->mutex);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto unlock;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -ENOSPC;
|
err = -ENOSPC;
|
||||||
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
pin_flags = PIN_USER;
|
pin_flags = PIN_USER;
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&eb->i915->drm.struct_mutex);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
|
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
|
||||||
|
@ -1632,7 +1641,6 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
|
||||||
|
|
||||||
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
|
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = &eb->i915->drm;
|
|
||||||
bool have_copy = false;
|
bool have_copy = false;
|
||||||
struct eb_vma *ev;
|
struct eb_vma *ev;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
@ -1643,8 +1651,6 @@ repeat:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We take 3 passes through the slowpatch.
|
* We take 3 passes through the slowpatch.
|
||||||
*
|
*
|
||||||
|
@ -1667,21 +1673,8 @@ repeat:
|
||||||
cond_resched();
|
cond_resched();
|
||||||
err = 0;
|
err = 0;
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err)
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
/* A frequent cause for EAGAIN are currently unavailable client pages */
|
|
||||||
flush_workqueue(eb->i915->mm.userptr_wq);
|
|
||||||
|
|
||||||
err = i915_mutex_lock_interruptible(dev);
|
|
||||||
if (err) {
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
GEM_BUG_ON(!eb->batch);
|
|
||||||
|
|
||||||
list_for_each_entry(ev, &eb->relocs, reloc_link) {
|
list_for_each_entry(ev, &eb->relocs, reloc_link) {
|
||||||
if (!have_copy) {
|
if (!have_copy) {
|
||||||
|
@ -1739,9 +1732,11 @@ static int eb_relocate(struct i915_execbuffer *eb)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
if (!list_empty(&eb->unbound)) {
|
||||||
err = eb_reserve(eb);
|
err = eb_reserve(eb);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* The objects are in their final locations, apply the relocations. */
|
/* The objects are in their final locations, apply the relocations. */
|
||||||
if (eb->args->flags & __EXEC_HAS_RELOC) {
|
if (eb->args->flags & __EXEC_HAS_RELOC) {
|
||||||
|
@ -2691,10 +2686,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
goto err_context;
|
goto err_context;
|
||||||
|
|
||||||
err = i915_mutex_lock_interruptible(dev);
|
|
||||||
if (err)
|
|
||||||
goto err_engine;
|
|
||||||
|
|
||||||
err = eb_relocate(&eb);
|
err = eb_relocate(&eb);
|
||||||
if (err) {
|
if (err) {
|
||||||
/*
|
/*
|
||||||
|
@ -2838,8 +2829,6 @@ err_vma:
|
||||||
eb_release_vmas(&eb);
|
eb_release_vmas(&eb);
|
||||||
if (eb.trampoline)
|
if (eb.trampoline)
|
||||||
i915_vma_unpin(eb.trampoline);
|
i915_vma_unpin(eb.trampoline);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
err_engine:
|
|
||||||
eb_unpin_engine(&eb);
|
eb_unpin_engine(&eb);
|
||||||
err_context:
|
err_context:
|
||||||
i915_gem_context_put(eb.gem_context);
|
i915_gem_context_put(eb.gem_context);
|
||||||
|
|
|
@ -1734,12 +1734,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
|
||||||
|
|
||||||
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
|
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
static inline int __must_check
|
|
||||||
i915_mutex_lock_interruptible(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
return mutex_lock_interruptible(&dev->struct_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||||
struct drm_device *dev,
|
struct drm_device *dev,
|
||||||
struct drm_mode_create_dumb *args);
|
struct drm_mode_create_dumb *args);
|
||||||
|
|
Loading…
Add table
Reference in a new issue