1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/xe: Avoid doing rebinds

If we dont change page sizes we can avoid doing rebinds rather just do a
partial unbind. The algorithm to determine its page size is greedy as we
assume all pages in the removed VMA are the largest page used in the
VMA.

v2: Don't exceed 100 lines
v3: struct xe_vma_op_unmap remove in different patch, remove XXX comment

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Matthew Brost 2023-07-19 14:46:01 -07:00 committed by Rodrigo Vivi
parent 3188c0f4c8
commit 8f33b4f054
3 changed files with 70 additions and 10 deletions

View file

@ -668,6 +668,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (!is_null) if (!is_null)
xe_res_next(curs, next - addr); xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next; xe_walk->va_curs_start = next;
xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
*action = ACTION_CONTINUE; *action = ACTION_CONTINUE;
return ret; return ret;

View file

@ -2396,6 +2396,16 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
return vma; return vma;
} }
static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{
if (vma->gpuva.flags & XE_VMA_PTE_1G)
return SZ_1G;
else if (vma->gpuva.flags & XE_VMA_PTE_2M)
return SZ_2M;
return SZ_4K;
}
/* /*
* Parse operations list and create any resources needed for the operations * Parse operations list and create any resources needed for the operations
* prior to fully committing to the operations. This setup can fail. * prior to fully committing to the operations. This setup can fail.
@ -2472,6 +2482,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
break; break;
} }
case DRM_GPUVA_OP_REMAP: case DRM_GPUVA_OP_REMAP:
{
struct xe_vma *old =
gpuva_to_vma(op->base.remap.unmap->va);
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
if (op->base.remap.prev) { if (op->base.remap.prev) {
struct xe_vma *vma; struct xe_vma *vma;
bool read_only = bool read_only =
@ -2490,6 +2507,20 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
} }
op->remap.prev = vma; op->remap.prev = vma;
/*
* Userptr creates a new SG mapping so
* we must also rebind.
*/
op->remap.skip_prev = !xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_end(vma),
xe_vma_max_pte_size(old));
if (op->remap.skip_prev) {
op->remap.range -=
xe_vma_end(vma) -
xe_vma_start(old);
op->remap.start = xe_vma_end(vma);
}
} }
if (op->base.remap.next) { if (op->base.remap.next) {
@ -2511,14 +2542,21 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
} }
op->remap.next = vma; op->remap.next = vma;
}
/* XXX: Support no doing remaps */ /*
op->remap.start = * Userptr creates a new SG mapping so
xe_vma_start(gpuva_to_vma(op->base.remap.unmap->va)); * we must also rebind.
op->remap.range = */
xe_vma_size(gpuva_to_vma(op->base.remap.unmap->va)); op->remap.skip_next = !xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_start(vma),
xe_vma_max_pte_size(old));
if (op->remap.skip_next)
op->remap.range -=
xe_vma_end(old) -
xe_vma_start(vma);
}
break; break;
}
case DRM_GPUVA_OP_UNMAP: case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH: case DRM_GPUVA_OP_PREFETCH:
/* Nothing to do */ /* Nothing to do */
@ -2561,10 +2599,23 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
case DRM_GPUVA_OP_REMAP: case DRM_GPUVA_OP_REMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
true); true);
if (op->remap.prev)
if (op->remap.prev) {
err |= xe_vm_insert_vma(vm, op->remap.prev); err |= xe_vm_insert_vma(vm, op->remap.prev);
if (op->remap.next) if (!err && op->remap.skip_prev)
op->remap.prev = NULL;
}
if (op->remap.next) {
err |= xe_vm_insert_vma(vm, op->remap.next); err |= xe_vm_insert_vma(vm, op->remap.next);
if (!err && op->remap.skip_next)
op->remap.next = NULL;
}
/* Adjust for partial unbind after removin VMA from VM */
if (!err) {
op->base.remap.unmap->va->va.addr = op->remap.start;
op->base.remap.unmap->va->va.range = op->remap.range;
}
break; break;
case DRM_GPUVA_OP_UNMAP: case DRM_GPUVA_OP_UNMAP:
prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
@ -2634,9 +2685,10 @@ again:
bool next = !!op->remap.next; bool next = !!op->remap.next;
if (!op->remap.unmap_done) { if (!op->remap.unmap_done) {
vm->async_ops.munmap_rebind_inflight = true; if (prev || next) {
if (prev || next) vm->async_ops.munmap_rebind_inflight = true;
vma->gpuva.flags |= XE_VMA_FIRST_REBIND; vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
}
err = xe_vm_unbind(vm, vma, op->engine, op->syncs, err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
op->num_syncs, op->num_syncs,
!prev && !next ? op->fence : NULL, !prev && !next ? op->fence : NULL,

View file

@ -30,6 +30,9 @@ struct xe_vm;
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3) #define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4) #define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
struct xe_vma { struct xe_vma {
/** @gpuva: Base GPUVA object */ /** @gpuva: Base GPUVA object */
@ -336,6 +339,10 @@ struct xe_vma_op_remap {
u64 start; u64 start;
/** @range: range of the VMA unmap */ /** @range: range of the VMA unmap */
u64 range; u64 range;
/** @skip_prev: skip prev rebind */
bool skip_prev;
/** @skip_next: skip next rebind */
bool skip_next;
/** @unmap_done: unmap operation in done */ /** @unmap_done: unmap operation in done */
bool unmap_done; bool unmap_done;
}; };