We really only need memcpy restore for objects that affect the operability of the migrate context. That is, primarily the page-table objects of the migrate VM. Add an object flag, I915_BO_ALLOC_PM_EARLY for objects that need early restores using memcpy and a way to assign LMEM page-table object flags to be used by the vms. Restore objects without this flag with the gpu blitter and only objects carrying the flag using TTM memcpy. Initially mark the migrate, gt, gtt and vgpu vms to use this flag, and defer for a later audit which vms actually need it. Most importantly, user- allocated vms with pinned page-table objects can be restored using the blitter. Performance-wise memcpy restore is probably as fast as gpu restore if not faster, but using gpu restore will help tackling future restrictions in mappable LMEM size. v4: - Don't mark the aliasing ppgtt page table flags for early resume, but rather the ggtt page table flags as intended. (Matthew Auld) - The check for user buffer objects during early resume is pointless, since they are never marked I915_BO_ALLOC_PM_EARLY. (Matthew Auld) v5: - Mark GuC LMEM objects with I915_BO_ALLOC_PM_EARLY to have them restored before we fire up the migrate context. Cc: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210922062527.865433-8-thomas.hellstrom@linux.intel.com
206 lines
5.4 KiB
C
206 lines
5.4 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2021 Intel Corporation
|
|
*/
|
|
|
|
#include <drm/ttm/ttm_placement.h>
|
|
#include <drm/ttm/ttm_tt.h>
|
|
|
|
#include "i915_drv.h"
|
|
#include "intel_memory_region.h"
|
|
#include "intel_region_ttm.h"
|
|
|
|
#include "gem/i915_gem_region.h"
|
|
#include "gem/i915_gem_ttm.h"
|
|
#include "gem/i915_gem_ttm_pm.h"
|
|
|
|
/**
|
|
* i915_ttm_backup_free - Free any backup attached to this object
|
|
* @obj: The object whose backup is to be freed.
|
|
*/
|
|
void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
|
|
{
|
|
if (obj->ttm.backup) {
|
|
i915_gem_object_put(obj->ttm.backup);
|
|
obj->ttm.backup = NULL;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
|
|
* @base: The i915_gem_apply_to_region we derive from.
|
|
* @allow_gpu: Whether using the gpu blitter is allowed.
|
|
* @backup_pinned: On backup, backup also pinned objects.
|
|
*/
|
|
struct i915_gem_ttm_pm_apply {
|
|
struct i915_gem_apply_to_region base;
|
|
bool allow_gpu : 1;
|
|
bool backup_pinned : 1;
|
|
};
|
|
|
|
static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
|
|
struct drm_i915_gem_object *obj)
|
|
{
|
|
struct i915_gem_ttm_pm_apply *pm_apply =
|
|
container_of(apply, typeof(*pm_apply), base);
|
|
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
|
struct ttm_buffer_object *backup_bo;
|
|
struct drm_i915_private *i915 =
|
|
container_of(bo->bdev, typeof(*i915), bdev);
|
|
struct drm_i915_gem_object *backup;
|
|
struct ttm_operation_ctx ctx = {};
|
|
int err = 0;
|
|
|
|
if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
|
|
return 0;
|
|
|
|
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
|
|
return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
|
|
|
|
if (!pm_apply->backup_pinned ||
|
|
(pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
|
|
return 0;
|
|
|
|
if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
|
|
return 0;
|
|
|
|
backup = i915_gem_object_create_shmem(i915, obj->base.size);
|
|
if (IS_ERR(backup))
|
|
return PTR_ERR(backup);
|
|
|
|
err = i915_gem_object_lock(backup, apply->ww);
|
|
if (err)
|
|
goto out_no_lock;
|
|
|
|
backup_bo = i915_gem_to_ttm(backup);
|
|
err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
|
|
if (err)
|
|
goto out_no_populate;
|
|
|
|
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
|
|
GEM_WARN_ON(err);
|
|
|
|
obj->ttm.backup = backup;
|
|
return 0;
|
|
|
|
out_no_populate:
|
|
i915_gem_ww_unlock_single(backup);
|
|
out_no_lock:
|
|
i915_gem_object_put(backup);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
|
|
struct drm_i915_gem_object *obj)
|
|
{
|
|
i915_ttm_backup_free(obj);
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* i915_ttm_recover_region - Free the backup of all objects of a region
|
|
* @mr: The memory region
|
|
*
|
|
* Checks all objects of a region if there is backup attached and if so
|
|
* frees that backup. Typically this is called to recover after a partially
|
|
* performed backup.
|
|
*/
|
|
void i915_ttm_recover_region(struct intel_memory_region *mr)
|
|
{
|
|
static const struct i915_gem_apply_to_region_ops recover_ops = {
|
|
.process_obj = i915_ttm_recover,
|
|
};
|
|
struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
|
|
int ret;
|
|
|
|
ret = i915_gem_process_region(mr, &apply);
|
|
GEM_WARN_ON(ret);
|
|
}
|
|
|
|
/**
|
|
* i915_ttm_backup_region - Back up all objects of a region to smem.
|
|
* @mr: The memory region
|
|
* @allow_gpu: Whether to allow the gpu blitter for this backup.
|
|
* @backup_pinned: Backup also pinned objects.
|
|
*
|
|
* Loops over all objects of a region and either evicts them if they are
|
|
* evictable or backs them up using a backup object if they are pinned.
|
|
*
|
|
* Return: Zero on success. Negative error code on error.
|
|
*/
|
|
int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
|
|
{
|
|
static const struct i915_gem_apply_to_region_ops backup_ops = {
|
|
.process_obj = i915_ttm_backup,
|
|
};
|
|
struct i915_gem_ttm_pm_apply pm_apply = {
|
|
.base = {.ops = &backup_ops},
|
|
.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
|
|
.backup_pinned = flags & I915_TTM_BACKUP_PINNED,
|
|
};
|
|
|
|
return i915_gem_process_region(mr, &pm_apply.base);
|
|
}
|
|
|
|
static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
|
|
struct drm_i915_gem_object *obj)
|
|
{
|
|
struct i915_gem_ttm_pm_apply *pm_apply =
|
|
container_of(apply, typeof(*pm_apply), base);
|
|
struct drm_i915_gem_object *backup = obj->ttm.backup;
|
|
struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
|
|
struct ttm_operation_ctx ctx = {};
|
|
int err;
|
|
|
|
if (!backup)
|
|
return 0;
|
|
|
|
if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
|
|
return 0;
|
|
|
|
err = i915_gem_object_lock(backup, apply->ww);
|
|
if (err)
|
|
return err;
|
|
|
|
/* Content may have been swapped. */
|
|
err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
|
|
if (!err) {
|
|
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
|
|
false);
|
|
GEM_WARN_ON(err);
|
|
|
|
obj->ttm.backup = NULL;
|
|
err = 0;
|
|
}
|
|
|
|
i915_gem_ww_unlock_single(backup);
|
|
|
|
if (!err)
|
|
i915_gem_object_put(backup);
|
|
|
|
return err;
|
|
}
|
|
|
|
/**
|
|
* i915_ttm_restore_region - Restore backed-up objects of a region from smem.
|
|
* @mr: The memory region
|
|
* @allow_gpu: Whether to allow the gpu blitter to recover.
|
|
*
|
|
* Loops over all objects of a region and if they are backed-up, restores
|
|
* them from smem.
|
|
*
|
|
* Return: Zero on success. Negative error code on error.
|
|
*/
|
|
int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
|
|
{
|
|
static const struct i915_gem_apply_to_region_ops restore_ops = {
|
|
.process_obj = i915_ttm_restore,
|
|
};
|
|
struct i915_gem_ttm_pm_apply pm_apply = {
|
|
.base = {.ops = &restore_ops},
|
|
.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
|
|
};
|
|
|
|
return i915_gem_process_region(mr, &pm_apply.base);
|
|
}
|