drm/i915/ttm: Update i915_gem_obj_copy_ttm() to be asynchronous
Update the copy function i915_gem_obj_copy_ttm() to be asynchronous for future users and update the only current user to sync the objects as needed after this function. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211122214554.371864-7-thomas.hellstrom@linux.intel.com
This commit is contained in:
parent
6385eb7ad8
commit
5652df829b
2 changed files with 30 additions and 12 deletions
|
@ -826,33 +826,49 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
|
|||
.interruptible = intr,
|
||||
};
|
||||
struct i915_refct_sgt *dst_rsgt;
|
||||
struct dma_fence *copy_fence;
|
||||
int ret;
|
||||
struct dma_fence *copy_fence, *dep_fence;
|
||||
struct i915_deps deps;
|
||||
int ret, shared_err;
|
||||
|
||||
assert_object_held(dst);
|
||||
assert_object_held(src);
|
||||
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
|
||||
|
||||
/*
|
||||
* Sync for now. This will change with async moves.
|
||||
* We plan to add a shared fence only for the source. If that
|
||||
* fails, we await all source fences before commencing
|
||||
* the copy instead of only the exclusive.
|
||||
*/
|
||||
ret = ttm_bo_wait_ctx(dst_bo, &ctx);
|
||||
shared_err = dma_resv_reserve_shared(src_bo->base.resv, 1);
|
||||
ret = i915_deps_add_resv(&deps, dst_bo->base.resv, true, false, &ctx);
|
||||
if (!ret)
|
||||
ret = ttm_bo_wait_ctx(src_bo, &ctx);
|
||||
ret = i915_deps_add_resv(&deps, src_bo->base.resv,
|
||||
!!shared_err, false, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dep_fence = i915_deps_to_fence(&deps, &ctx);
|
||||
if (IS_ERR(dep_fence))
|
||||
return PTR_ERR(dep_fence);
|
||||
|
||||
dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource);
|
||||
copy_fence = __i915_ttm_move(src_bo, false, dst_bo->resource,
|
||||
dst_bo->ttm, dst_rsgt, allow_accel, NULL);
|
||||
dst_bo->ttm, dst_rsgt, allow_accel,
|
||||
dep_fence);
|
||||
|
||||
i915_refct_sgt_put(dst_rsgt);
|
||||
if (IS_ERR(copy_fence))
|
||||
return PTR_ERR(copy_fence);
|
||||
if (IS_ERR_OR_NULL(copy_fence))
|
||||
return PTR_ERR_OR_ZERO(copy_fence);
|
||||
|
||||
if (copy_fence) {
|
||||
dma_fence_wait(copy_fence, false);
|
||||
dma_fence_put(copy_fence);
|
||||
}
|
||||
dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
|
||||
|
||||
/* If we failed to reserve a shared slot, add an exclusive fence */
|
||||
if (shared_err)
|
||||
dma_resv_add_excl_fence(src_bo->base.resv, copy_fence);
|
||||
else
|
||||
dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
|
||||
|
||||
dma_fence_put(copy_fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
|
|||
|
||||
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
|
||||
GEM_WARN_ON(err);
|
||||
ttm_bo_wait_ctx(backup_bo, &ctx);
|
||||
|
||||
obj->ttm.backup = backup;
|
||||
return 0;
|
||||
|
@ -170,6 +171,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
|
|||
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
|
||||
false);
|
||||
GEM_WARN_ON(err);
|
||||
ttm_bo_wait_ctx(backup_bo, &ctx);
|
||||
|
||||
obj->ttm.backup = NULL;
|
||||
err = 0;
|
||||
|
|
Loading…
Add table
Reference in a new issue