1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

drm/amdkfd: skip invalid pages during migrations

Invalid pages can be the result of pages that have been migrated
already due to copy-on-write procedure or pages that were never
migrated to VRAM in first place. This is not an issue anymore,
as pranges now support mixed memory domains (CPU/GPU).

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Sierra 2021-05-12 10:54:58 -05:00 committed by Alex Deucher
parent 1d5dbfe6c0
commit 1ade5f84cc

View file

@ -372,7 +372,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
size_t size;
void *buf;
int r = -ENOMEM;
int retry = 0;
memset(&migrate, 0, sizeof(migrate));
migrate.vma = vma;
@ -391,7 +390,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.dst = migrate.src + npages;
scratch = (dma_addr_t *)(migrate.dst + npages);
retry:
r = migrate_vma_setup(&migrate);
if (r) {
pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
@ -399,17 +397,9 @@ retry:
goto out_free;
}
if (migrate.cpages != npages) {
pr_debug("collect 0x%lx/0x%llx pages, retry\n", migrate.cpages,
pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
migrate.cpages,
npages);
migrate_vma_finalize(&migrate);
if (retry++ >= 3) {
r = -ENOMEM;
pr_debug("failed %d migrate svms 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange->start, prange->last);
goto out_free;
}
goto retry;
}
if (migrate.cpages) {
@ -506,9 +496,8 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch)
dma_addr_t *scratch, uint64_t npages)
{
uint64_t npages = migrate->cpages;
struct device *dev = adev->dev;
uint64_t *src;
dma_addr_t *dst;
@ -525,15 +514,23 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
src = (uint64_t *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, j++, addr += PAGE_SIZE) {
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
struct page *spage;
spage = migrate_pfn_to_page(migrate->src[i]);
if (!spage) {
pr_debug("failed get spage svms 0x%p [0x%lx 0x%lx]\n",
if (!spage || !is_zone_device_page(spage)) {
pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
r = -ENOMEM;
goto out_oom;
if (j) {
r = svm_migrate_copy_memory_gart(adev, dst + i - j,
src + i - j, j,
FROM_VRAM_TO_RAM,
mfence);
if (r)
goto out_oom;
j = 0;
}
continue;
}
src[i] = svm_migrate_addr(adev, spage);
if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
@ -566,6 +563,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
j++;
}
r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
@ -624,7 +622,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
if (migrate.cpages) {
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch);
scratch, npages);
migrate_vma_pages(&migrate);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);