1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/i915/selftests/mock_region.c
Matthew Auld d22632c83b drm/i915: support forcing the page size with lmem
For some specialised objects we might need something larger than the
regions min_page_size due to some hw restriction, and slightly more
hairy is needing something smaller with the guarantee that such objects
will never be inserted into any GTT, which is the case for the paging
structures.

This also fixes how we setup the BO page_alignment, if we later migrate
the object somewhere else. For example if the placements are {SMEM,
LMEM}, then we might get this wrong. Pushing the min_page_size behaviour
into the manager should fix this.

v2(Thomas): push the default page size behaviour into buddy_man, and let
the user override it with the page-alignment, which looks cleaner

v3: rebase on ttm sys changes

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210625103824.558481-1-matthew.auld@intel.com
2021-06-30 13:24:29 +01:00

119 lines
3 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2019-2021 Intel Corporation
*/
#include <drm/ttm/ttm_placement.h>
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_placement.h>
#include "gem/i915_gem_region.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "mock_region.h"
static void mock_region_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
sg_free_table(pages);
kfree(pages);
}
static int mock_region_get_pages(struct drm_i915_gem_object *obj)
{
unsigned int flags;
struct sg_table *pages;
int err;
flags = 0;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= TTM_PL_FLAG_CONTIGUOUS;
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
obj->base.size,
flags);
if (IS_ERR(obj->mm.res))
return PTR_ERR(obj->mm.res);
pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res);
if (IS_ERR(pages)) {
err = PTR_ERR(pages);
goto err_free_resource;
}
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
return 0;
err_free_resource:
intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
return err;
}
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.name = "mock-region",
.get_pages = mock_region_get_pages,
.put_pages = mock_region_put_pages,
.release = i915_gem_object_release_memory_region,
};
static int mock_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
if (size > resource_size(&mem->region))
return -E2BIG;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
i915_gem_object_init_memory_region(obj, mem);
return 0;
}
static void mock_region_fini(struct intel_memory_region *mem)
{
struct drm_i915_private *i915 = mem->i915;
int instance = mem->instance;
intel_region_ttm_fini(mem);
ida_free(&i915->selftest.mock_region_instances, instance);
}
static const struct intel_memory_region_ops mock_region_ops = {
.init = intel_region_ttm_init,
.release = mock_region_fini,
.init_object = mock_object_init,
};
struct intel_memory_region *
mock_region_create(struct drm_i915_private *i915,
resource_size_t start,
resource_size_t size,
resource_size_t min_page_size,
resource_size_t io_start)
{
int instance = ida_alloc_max(&i915->selftest.mock_region_instances,
TTM_NUM_MEM_TYPES - TTM_PL_PRIV - 1,
GFP_KERNEL);
if (instance < 0)
return ERR_PTR(instance);
return intel_memory_region_create(i915, start, size, min_page_size,
io_start, INTEL_MEMORY_MOCK, instance,
&mock_region_ops);
}