drm/xe: Invalidate userptr VMA on page pin fault
Rather than return an error to the user or ban the VM when userptr VMA
page pin fails with -EFAULT, invalidate VMA mappings. This supports the
UMD use case of freeing userptr while still having bindings.
Now that non-faulting VMs can invalidate VMAs, drop the usm prefix for
the tile_invalidated member.
v2:
- Fix build error (CI)
v3:
- Don't invalidate VMA if in fault mode, rather kill VM (Thomas)
- Update commit message with tile_invalidated name chagne (Thomas)
- Wait VM bookkeep slots with VM resv lock (Thomas)
v4:
- Move list_del_init(&userptr.repin_link) after error check (Thomas)
- Assert not in fault mode (Matthew)
Fixes: dd08ebf6c3
("drm/xe: Introduce a new DRM driver for Intel GPUs")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240312183907.933835-1-matthew.brost@intel.com
This commit is contained in:
parent
8e61e3192a
commit
521db22a1d
4 changed files with 29 additions and 16 deletions
|
@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
|
|||
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
|
||||
{
|
||||
return BIT(tile->id) & vma->tile_present &&
|
||||
!(BIT(tile->id) & vma->usm.tile_invalidated);
|
||||
!(BIT(tile->id) & vma->tile_invalidated);
|
||||
}
|
||||
|
||||
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
|
||||
|
@ -226,7 +226,7 @@ retry_userptr:
|
|||
|
||||
if (xe_vma_is_userptr(vma))
|
||||
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
|
||||
vma->usm.tile_invalidated &= ~BIT(tile->id);
|
||||
vma->tile_invalidated &= ~BIT(tile->id);
|
||||
|
||||
unlock_dma_resv:
|
||||
drm_exec_fini(&exec);
|
||||
|
|
|
@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
|
|||
TP_ARGS(vma)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
|
||||
DEFINE_EVENT(xe_vma, xe_vma_invalidate,
|
||||
TP_PROTO(struct xe_vma *vma),
|
||||
TP_ARGS(vma)
|
||||
);
|
||||
|
|
|
@ -708,6 +708,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
|||
int err = 0;
|
||||
LIST_HEAD(tmp_evict);
|
||||
|
||||
xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
|
||||
/* Collect invalidated userptrs */
|
||||
|
@ -724,11 +725,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
|||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
err = xe_vma_userptr_pin_pages(uvma);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err == -EFAULT) {
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
|
||||
/* Wait for pending binds */
|
||||
xe_vm_lock(vm, false);
|
||||
dma_resv_wait_timeout(xe_vm_resv(vm),
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
err = xe_vm_invalidate_vma(&uvma->vma);
|
||||
xe_vm_unlock(vm);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind,
|
||||
&vm->rebind_list);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2024,7 +2041,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
|
||||
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
|
||||
true, first_op, last_op);
|
||||
} else {
|
||||
|
@ -3214,9 +3231,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
|||
u8 id;
|
||||
int ret;
|
||||
|
||||
xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
|
||||
xe_assert(xe, !xe_vma_is_null(vma));
|
||||
trace_xe_vma_usm_invalidate(vma);
|
||||
trace_xe_vma_invalidate(vma);
|
||||
|
||||
/* Check that we don't race with page-table updates */
|
||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
|
||||
|
@ -3254,7 +3270,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
|||
}
|
||||
}
|
||||
|
||||
vma->usm.tile_invalidated = vma->tile_mask;
|
||||
vma->tile_invalidated = vma->tile_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -84,11 +84,8 @@ struct xe_vma {
|
|||
struct work_struct destroy_work;
|
||||
};
|
||||
|
||||
/** @usm: unified shared memory state */
|
||||
struct {
|
||||
/** @tile_invalidated: VMA has been invalidated */
|
||||
u8 tile_invalidated;
|
||||
} usm;
|
||||
/** @tile_invalidated: VMA has been invalidated */
|
||||
u8 tile_invalidated;
|
||||
|
||||
/** @tile_mask: Tile mask of where to create binding for this VMA */
|
||||
u8 tile_mask;
|
||||
|
|
Loading…
Add table
Reference in a new issue