1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/msm/gem: Unpin buffers earlier

We've already attached the fences, so obj->resv (which shrinker checks)
tells us whether they are still active.  So we can unpin sooner, before
we drop the queue lock.

This also avoids the need to grab the obj lock in the retire path,
avoiding potential for lock contention between submit and retire.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/496132/
Link: https://lore.kernel.org/r/20220802155152.1727594-12-robdclark@gmail.com
This commit is contained in:
Rob Clark 2022-08-02 08:51:44 -07:00
parent b352ba54a8
commit f371bcc0c2

View file

@ -501,11 +501,11 @@ out:
*/ */
static void submit_cleanup(struct msm_gem_submit *submit, bool error) static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{ {
unsigned cleanup_flags = BO_LOCKED; unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED;
unsigned i; unsigned i;
if (error) if (error)
cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED; cleanup_flags |= BO_VMA_PINNED;
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
@ -522,10 +522,6 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base; struct drm_gem_object *obj = &submit->bos[i].obj->base;
msm_gem_lock(obj);
/* Note, VMA already fence-unpinned before submit: */
submit_cleanup_bo(submit, i, BO_OBJ_PINNED);
msm_gem_unlock(obj);
drm_gem_object_put(obj); drm_gem_object_put(obj);
} }
} }