uAPI:
- OA uapi fix (Umesh) Driver: - Userptr related fixes (Auld) - Remove a duplicated register entry (Mingong) - Scheduler related fix to prevent exec races when freeing it (Tejas) -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmfAkoQACgkQ+mJfZA7r E8rIBAf/Wxx/3VK9zePgkqfJ7B2KN+Y70v/Jz6U3hQczCEfmi0l/DYLVQDc5z/Cc 3Pa7v9r7eHipkAz5Nb5h12FdrmBmJzcnVVseacn2I+goezShdYRyMKDA6c5YTfCi ZEoJEgz/fBG+EuiYijbx5VygOz9spht6QZEtZrWPLqzgjgTyKqviHWj+MWMyyl5m bUFKhO7vnJ8xBDRxCeIEPtjcjITo/4nZlb+WF0OiKB/bh6PPQqizO0iHViYO7QMD Zy235H9JFRjOaWidahQ+46BsoZekxiSwiq4ze2aqsoqivvFVRLg9OMX3Ce35iL8g XLNgHyn96weAJfMNfvIiTKz7dhFsqg== =iG0k -----END PGP SIGNATURE----- Merge tag 'drm-xe-fixes-2025-02-27' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes uAPI: - OA uapi fix (Umesh) Driver: - Userptr related fixes (Auld) - Remove a duplicated register entry (Mingong) - Scheduler related fix to prevent exec races when freeing it (Tejas) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/Z8CSqJre1VCjPXt2@intel.com
This commit is contained in:
commit
6a5884f200
4 changed files with 38 additions and 10 deletions
|
@ -53,7 +53,6 @@
|
|||
|
||||
#define RING_CTL(base) XE_REG((base) + 0x3c)
|
||||
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
|
||||
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
|
||||
|
||||
#define RING_START_UDW(base) XE_REG((base) + 0x48)
|
||||
|
||||
|
|
|
@ -1248,6 +1248,8 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
|
|||
|
||||
if (xe_exec_queue_is_lr(q))
|
||||
cancel_work_sync(&ge->lr_tdr);
|
||||
/* Confirm no work left behind accessing device structures */
|
||||
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
|
||||
release_guc_id(guc, q);
|
||||
xe_sched_entity_fini(&ge->entity);
|
||||
xe_sched_fini(&ge->sched);
|
||||
|
|
|
@ -1689,7 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
|
|||
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
|
||||
|
||||
stream->sample = param->sample;
|
||||
stream->periodic = param->period_exponent > 0;
|
||||
stream->periodic = param->period_exponent >= 0;
|
||||
stream->period_exponent = param->period_exponent;
|
||||
stream->no_preempt = param->no_preempt;
|
||||
stream->wait_num_reports = param->wait_num_reports;
|
||||
|
@ -1970,6 +1970,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
|
|||
}
|
||||
|
||||
param.xef = xef;
|
||||
param.period_exponent = -1;
|
||||
ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2024,7 +2025,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
|
|||
goto err_exec_q;
|
||||
}
|
||||
|
||||
if (param.period_exponent > 0) {
|
||||
if (param.period_exponent >= 0) {
|
||||
u64 oa_period, oa_freq_hz;
|
||||
|
||||
/* Requesting samples from OAG buffer is a privileged operation */
|
||||
|
|
|
@ -666,20 +666,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
|||
|
||||
/* Collect invalidated userptrs */
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
|
||||
userptr.invalidate_link) {
|
||||
list_del_init(&uvma->userptr.invalidate_link);
|
||||
list_move_tail(&uvma->userptr.repin_link,
|
||||
&vm->userptr.repin_list);
|
||||
list_add_tail(&uvma->userptr.repin_link,
|
||||
&vm->userptr.repin_list);
|
||||
}
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
|
||||
/* Pin and move to temporary list */
|
||||
/* Pin and move to bind list */
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
err = xe_vma_userptr_pin_pages(uvma);
|
||||
if (err == -EFAULT) {
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
/*
|
||||
* We might have already done the pin once already, but
|
||||
* then had to retry before the re-bind happened, due
|
||||
* some other condition in the caller, but in the
|
||||
* meantime the userptr got dinged by the notifier such
|
||||
* that we need to revalidate here, but this time we hit
|
||||
* the EFAULT. In such a case make sure we remove
|
||||
* ourselves from the rebind list to avoid going down in
|
||||
* flames.
|
||||
*/
|
||||
if (!list_empty(&uvma->vma.combined_links.rebind))
|
||||
list_del_init(&uvma->vma.combined_links.rebind);
|
||||
|
||||
/* Wait for pending binds */
|
||||
xe_vm_lock(vm, false);
|
||||
|
@ -690,10 +703,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
|||
err = xe_vm_invalidate_vma(&uvma->vma);
|
||||
xe_vm_unlock(vm);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
} else {
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (err)
|
||||
break;
|
||||
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind,
|
||||
|
@ -701,7 +714,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
|||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (err) {
|
||||
down_write(&vm->userptr.notifier_lock);
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->userptr.invalidate_link,
|
||||
&vm->userptr.invalidated);
|
||||
}
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
up_write(&vm->userptr.notifier_lock);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1066,6 +1091,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
|
|||
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
|
||||
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
|
||||
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
} else if (!xe_vma_is_null(vma)) {
|
||||
|
|
Loading…
Add table
Reference in a new issue