drm/i915: Merge wait_for_timelines with retire_request
wait_for_timelines is essentially the same loop as retiring requests (with an extra timeout), so merge the two into one routine. v2: i915_retire_requests_timeout and keep VT'd w/a as !interruptible Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-10-chris@chris-wilson.co.uk
This commit is contained in:
parent
33d856445b
commit
f33a8a5160
14 changed files with 50 additions and 96 deletions
|
@ -432,9 +432,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
|
||||||
|
|
||||||
/* Attempt to reap some mmap space from dead objects */
|
/* Attempt to reap some mmap space from dead objects */
|
||||||
do {
|
do {
|
||||||
err = i915_gem_wait_for_idle(i915,
|
err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
|
||||||
I915_WAIT_INTERRUPTIBLE,
|
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -59,9 +59,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
|
||||||
{
|
{
|
||||||
bool result = !intel_gt_is_wedged(gt);
|
bool result = !intel_gt_is_wedged(gt);
|
||||||
|
|
||||||
if (i915_gem_wait_for_idle(gt->i915,
|
if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
|
||||||
I915_WAIT_FOR_IDLE_BOOST,
|
|
||||||
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
|
|
||||||
/* XXX hide warning from gem_eio */
|
/* XXX hide warning from gem_eio */
|
||||||
if (i915_modparams.reset) {
|
if (i915_modparams.reset) {
|
||||||
dev_err(gt->i915->drm.dev,
|
dev_err(gt->i915->drm.dev,
|
||||||
|
|
|
@ -1137,7 +1137,7 @@ out:
|
||||||
|
|
||||||
if ((flags & TEST_IDLE) && ret == 0) {
|
if ((flags & TEST_IDLE) && ret == 0) {
|
||||||
ret = i915_gem_wait_for_idle(ce->engine->i915,
|
ret = i915_gem_wait_for_idle(ce->engine->i915,
|
||||||
0, MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -196,8 +196,7 @@ int intel_gt_resume(struct intel_gt *gt)
|
||||||
|
|
||||||
static void wait_for_idle(struct intel_gt *gt)
|
static void wait_for_idle(struct intel_gt *gt)
|
||||||
{
|
{
|
||||||
if (i915_gem_wait_for_idle(gt->i915, 0,
|
if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
|
||||||
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
|
|
||||||
/*
|
/*
|
||||||
* Forcibly cancel outstanding work and leave
|
* Forcibly cancel outstanding work and leave
|
||||||
* the gpu quiet.
|
* the gpu quiet.
|
||||||
|
|
|
@ -3635,9 +3635,7 @@ i915_drop_caches_set(void *data, u64 val)
|
||||||
i915_retire_requests(i915);
|
i915_retire_requests(i915);
|
||||||
|
|
||||||
if (val & (DROP_IDLE | DROP_ACTIVE)) {
|
if (val & (DROP_IDLE | DROP_ACTIVE)) {
|
||||||
ret = i915_gem_wait_for_idle(i915,
|
ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
|
||||||
I915_WAIT_INTERRUPTIBLE,
|
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2321,8 +2321,7 @@ void i915_gem_driver_register(struct drm_i915_private *i915);
|
||||||
void i915_gem_driver_unregister(struct drm_i915_private *i915);
|
void i915_gem_driver_unregister(struct drm_i915_private *i915);
|
||||||
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
|
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
|
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
|
||||||
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
|
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
|
||||||
unsigned int flags, long timeout);
|
|
||||||
void i915_gem_suspend(struct drm_i915_private *dev_priv);
|
void i915_gem_suspend(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
|
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_resume(struct drm_i915_private *dev_priv);
|
void i915_gem_resume(struct drm_i915_private *dev_priv);
|
||||||
|
|
|
@ -883,61 +883,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static long
|
int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
|
||||||
wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout)
|
|
||||||
{
|
|
||||||
struct intel_gt_timelines *timelines = >->timelines;
|
|
||||||
struct intel_timeline *tl;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&timelines->lock, flags);
|
|
||||||
list_for_each_entry(tl, &timelines->active_list, link) {
|
|
||||||
struct dma_fence *fence;
|
|
||||||
|
|
||||||
fence = i915_active_fence_get(&tl->last_request);
|
|
||||||
if (!fence)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
|
||||||
|
|
||||||
if (!dma_fence_is_i915(fence)) {
|
|
||||||
timeout = dma_fence_wait_timeout(fence,
|
|
||||||
flags & I915_WAIT_INTERRUPTIBLE,
|
|
||||||
timeout);
|
|
||||||
} else {
|
|
||||||
struct i915_request *rq = to_request(fence);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* "Race-to-idle".
|
|
||||||
*
|
|
||||||
* Switching to the kernel context is often used as
|
|
||||||
* a synchronous step prior to idling, e.g. in suspend
|
|
||||||
* for flushing all current operations to memory before
|
|
||||||
* sleeping. These we want to complete as quickly as
|
|
||||||
* possible to avoid prolonged stalls, so allow the gpu
|
|
||||||
* to boost to maximum clocks.
|
|
||||||
*/
|
|
||||||
if (flags & I915_WAIT_FOR_IDLE_BOOST)
|
|
||||||
gen6_rps_boost(rq);
|
|
||||||
|
|
||||||
timeout = i915_request_wait(rq, flags, timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_fence_put(fence);
|
|
||||||
if (timeout < 0)
|
|
||||||
return timeout;
|
|
||||||
|
|
||||||
/* restart after reacquiring the lock */
|
|
||||||
spin_lock_irqsave(&timelines->lock, flags);
|
|
||||||
tl = list_entry(&timelines->active_list, typeof(*tl), link);
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
|
||||||
|
|
||||||
return timeout;
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
|
|
||||||
unsigned int flags, long timeout)
|
|
||||||
{
|
{
|
||||||
struct intel_gt *gt = &i915->gt;
|
struct intel_gt *gt = &i915->gt;
|
||||||
|
|
||||||
|
@ -945,18 +891,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
|
||||||
if (!intel_gt_pm_is_awake(gt))
|
if (!intel_gt_pm_is_awake(gt))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
do {
|
while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) {
|
||||||
timeout = wait_for_timelines(gt, flags, timeout);
|
|
||||||
if (timeout < 0)
|
|
||||||
return timeout;
|
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
}
|
||||||
|
|
||||||
} while (i915_retire_requests(i915));
|
return timeout;
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct i915_vma *
|
struct i915_vma *
|
||||||
|
|
|
@ -46,9 +46,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
|
||||||
* the hopes that we can then remove contexts and the like only
|
* the hopes that we can then remove contexts and the like only
|
||||||
* bound by their active reference.
|
* bound by their active reference.
|
||||||
*/
|
*/
|
||||||
return i915_gem_wait_for_idle(i915,
|
return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
|
||||||
I915_WAIT_INTERRUPTIBLE,
|
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
|
@ -126,6 +124,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
||||||
min_size, alignment, color,
|
min_size, alignment, color,
|
||||||
start, end, mode);
|
start, end, mode);
|
||||||
|
|
||||||
|
i915_retire_requests(vm->i915);
|
||||||
|
|
||||||
search_again:
|
search_again:
|
||||||
active = NULL;
|
active = NULL;
|
||||||
INIT_LIST_HEAD(&eviction_list);
|
INIT_LIST_HEAD(&eviction_list);
|
||||||
|
@ -264,13 +264,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||||
|
|
||||||
trace_i915_gem_evict_node(vm, target, flags);
|
trace_i915_gem_evict_node(vm, target, flags);
|
||||||
|
|
||||||
/* Retire before we search the active list. Although we have
|
/*
|
||||||
|
* Retire before we search the active list. Although we have
|
||||||
* reasonable accuracy in our retirement lists, we may have
|
* reasonable accuracy in our retirement lists, we may have
|
||||||
* a stray pin (preventing eviction) that can only be resolved by
|
* a stray pin (preventing eviction) that can only be resolved by
|
||||||
* retiring.
|
* retiring.
|
||||||
*/
|
*/
|
||||||
if (!(flags & PIN_NONBLOCK))
|
i915_retire_requests(vm->i915);
|
||||||
i915_retire_requests(vm->i915);
|
|
||||||
|
|
||||||
if (i915_vm_has_cache_coloring(vm)) {
|
if (i915_vm_has_cache_coloring(vm)) {
|
||||||
/* Expand search to cover neighbouring guard pages (or lack!) */
|
/* Expand search to cover neighbouring guard pages (or lack!) */
|
||||||
|
|
|
@ -2528,7 +2528,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
|
||||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||||
|
|
||||||
if (unlikely(ggtt->do_idle_maps)) {
|
if (unlikely(ggtt->do_idle_maps)) {
|
||||||
if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
|
/* XXX This does not prevent more requests being submitted! */
|
||||||
|
if (i915_retire_requests_timeout(dev_priv,
|
||||||
|
-MAX_SCHEDULE_TIMEOUT)) {
|
||||||
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
||||||
/* Wait a bit, in hopes it avoids the hang */
|
/* Wait a bit, in hopes it avoids the hang */
|
||||||
udelay(10);
|
udelay(10);
|
||||||
|
|
|
@ -1508,13 +1508,19 @@ out:
|
||||||
return timeout;
|
return timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool i915_retire_requests(struct drm_i915_private *i915)
|
long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout)
|
||||||
{
|
{
|
||||||
struct intel_gt_timelines *timelines = &i915->gt.timelines;
|
struct intel_gt_timelines *timelines = &i915->gt.timelines;
|
||||||
struct intel_timeline *tl, *tn;
|
struct intel_timeline *tl, *tn;
|
||||||
|
unsigned long active_count = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool interruptible;
|
||||||
LIST_HEAD(free);
|
LIST_HEAD(free);
|
||||||
|
|
||||||
|
interruptible = true;
|
||||||
|
if (timeout < 0)
|
||||||
|
timeout = -timeout, interruptible = false;
|
||||||
|
|
||||||
spin_lock_irqsave(&timelines->lock, flags);
|
spin_lock_irqsave(&timelines->lock, flags);
|
||||||
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
|
||||||
if (!mutex_trylock(&tl->mutex))
|
if (!mutex_trylock(&tl->mutex))
|
||||||
|
@ -1525,13 +1531,27 @@ bool i915_retire_requests(struct drm_i915_private *i915)
|
||||||
tl->active_count++; /* pin the list element */
|
tl->active_count++; /* pin the list element */
|
||||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
spin_unlock_irqrestore(&timelines->lock, flags);
|
||||||
|
|
||||||
|
if (timeout > 0) {
|
||||||
|
struct dma_fence *fence;
|
||||||
|
|
||||||
|
fence = i915_active_fence_get(&tl->last_request);
|
||||||
|
if (fence) {
|
||||||
|
timeout = dma_fence_wait_timeout(fence,
|
||||||
|
interruptible,
|
||||||
|
timeout);
|
||||||
|
dma_fence_put(fence);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
retire_requests(tl);
|
retire_requests(tl);
|
||||||
|
|
||||||
spin_lock_irqsave(&timelines->lock, flags);
|
spin_lock_irqsave(&timelines->lock, flags);
|
||||||
|
|
||||||
/* Resume iteration after dropping lock */
|
/* Resume iteration after dropping lock */
|
||||||
list_safe_reset_next(tl, tn, link);
|
list_safe_reset_next(tl, tn, link);
|
||||||
if (!--tl->active_count)
|
if (--tl->active_count)
|
||||||
|
active_count += !!rcu_access_pointer(tl->last_request.fence);
|
||||||
|
else
|
||||||
list_del(&tl->link);
|
list_del(&tl->link);
|
||||||
|
|
||||||
mutex_unlock(&tl->mutex);
|
mutex_unlock(&tl->mutex);
|
||||||
|
@ -1547,7 +1567,7 @@ bool i915_retire_requests(struct drm_i915_private *i915)
|
||||||
list_for_each_entry_safe(tl, tn, &free, link)
|
list_for_each_entry_safe(tl, tn, &free, link)
|
||||||
__intel_timeline_free(&tl->kref);
|
__intel_timeline_free(&tl->kref);
|
||||||
|
|
||||||
return !list_empty(&timelines->active_list);
|
return active_count ? timeout : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||||
|
|
|
@ -310,7 +310,6 @@ long i915_request_wait(struct i915_request *rq,
|
||||||
#define I915_WAIT_INTERRUPTIBLE BIT(0)
|
#define I915_WAIT_INTERRUPTIBLE BIT(0)
|
||||||
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
|
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
|
||||||
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
|
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
|
||||||
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
|
|
||||||
|
|
||||||
static inline bool i915_request_signaled(const struct i915_request *rq)
|
static inline bool i915_request_signaled(const struct i915_request *rq)
|
||||||
{
|
{
|
||||||
|
@ -460,6 +459,10 @@ i915_request_active_timeline(struct i915_request *rq)
|
||||||
lockdep_is_held(&rq->engine->active.lock));
|
lockdep_is_held(&rq->engine->active.lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool i915_retire_requests(struct drm_i915_private *i915);
|
long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout);
|
||||||
|
static inline void i915_retire_requests(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
i915_retire_requests_timeout(i915, 0);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* I915_REQUEST_H */
|
#endif /* I915_REQUEST_H */
|
||||||
|
|
|
@ -18,8 +18,7 @@ int igt_flush_test(struct drm_i915_private *i915)
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
i915_retire_requests(i915);
|
if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) {
|
||||||
if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
|
|
||||||
pr_err("%pS timed out, cancelling all further testing.\n",
|
pr_err("%pS timed out, cancelling all further testing.\n",
|
||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
|
|
||||||
|
@ -30,7 +29,6 @@ int igt_flush_test(struct drm_i915_private *i915)
|
||||||
intel_gt_set_wedged(&i915->gt);
|
intel_gt_set_wedged(&i915->gt);
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
}
|
}
|
||||||
i915_retire_requests(i915);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,9 +23,7 @@ int igt_live_test_begin(struct igt_live_test *t,
|
||||||
t->func = func;
|
t->func = func;
|
||||||
t->name = name;
|
t->name = name;
|
||||||
|
|
||||||
err = i915_gem_wait_for_idle(i915,
|
err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
|
||||||
I915_WAIT_INTERRUPTIBLE,
|
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("%s(%s): failed to idle before, with err=%d!",
|
pr_err("%s(%s): failed to idle before, with err=%d!",
|
||||||
func, name, err);
|
func, name, err);
|
||||||
|
|
|
@ -44,7 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
|
||||||
do {
|
do {
|
||||||
for_each_engine(engine, i915, id)
|
for_each_engine(engine, i915, id)
|
||||||
mock_engine_flush(engine);
|
mock_engine_flush(engine);
|
||||||
} while (i915_retire_requests(i915));
|
} while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mock_device_release(struct drm_device *dev)
|
static void mock_device_release(struct drm_device *dev)
|
||||||
|
|
Loading…
Add table
Reference in a new issue