1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/amdkfd: Move dma unmapping after TLB flush

Otherwise GPU may access the stale mapping and generate IOMMU
IO_PAGE_FAULT.

Move this to inside p->mutex to prevent multiple threads mapping and
unmapping concurrently race condition.

After kfd_mem_dmaunmap_attachment is removed from unmap_bo_from_gpuvm,
kfd_mem_dmaunmap_attachment is called if failed to map to GPUs, and
before free the mem attachment in case failed to unmap from GPUs.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Philip Yang 2023-09-11 14:44:22 -04:00 committed by Alex Deucher
parent 08abccc9a7
commit 101b810430
3 changed files with 36 additions and 13 deletions

View file

@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,

View file

@ -733,7 +733,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
enum dma_data_direction dir;
if (unlikely(!ttm->sg)) {
pr_err("SG Table of BO is UNEXPECTEDLY NULL");
pr_debug("SG Table of BO is NULL");
return;
}
@ -1202,8 +1202,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update);
kfd_mem_dmaunmap_attachment(mem, entry);
}
static int update_gpuvm_pte(struct kgd_mem *mem,
@ -1258,6 +1256,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
update_gpuvm_pte_failed:
unmap_bo_from_gpuvm(mem, entry, sync);
kfd_mem_dmaunmap_attachment(mem, entry);
return ret;
}
@ -1863,8 +1862,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
kfd_mem_dmaunmap_attachment(mem, entry);
kfd_mem_detach(entry);
}
ret = unreserve_bo_and_vms(&ctx, false, false);
@ -2038,6 +2039,23 @@ out:
return ret;
}
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm == vm)
kfd_mem_dmaunmap_attachment(mem, entry);
}
mutex_unlock(&mem->lock);
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{

View file

@ -1432,17 +1432,21 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto sync_memory_failed;
}
}
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
if (flush_tlb)
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
}
mutex_unlock(&p->mutex);
if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
}
}
kfree(devices_arr);
return 0;