1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/i915/gem/i915_gem_region.c
Dave Airlie 41d1d0c51f Merge tag 'drm-intel-gt-next-2021-04-06' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Driver Changes:

- Prepare for local/device memory support on DG1 by starting
  to use it for kernel internal allocations: context, ring
  and engine scratch (Matt A, CQ, Abdiel, Imre)
- Sandybridge fix to avoid hard hang on ring resume (Chris)
- Limit imported dma-buf size to int32 (Matt A)
- Double check heartbeat timeout before resetting (Chris)

- Use new tasklet API for execution list (Emil)
- Fix SPDX checkpats warnings (Chris)
- Fixes for various checkpatch warnings (Chris)
- Selftest improvements (Chris)
- Move the defer_request waiter active assertion to correct spot (Chris)
- Make local-memory probing a GT operation (Matt, Tvrtko)
- Protect against request freeing during cancellation on wedging (Chris)
- Retire unexpected starting state error dumping (Chris)
- Distinction of memory regions in debugging (Zbigniew)
- Always flush the submission queue on checking for idle (Chris)

- Consolidate 2big error check to helper (Matt)
- Decrease number of subplatform bits (Tvrtko)
- Remove unused internal request priority levels (Chris)
- Document the unused internal header bits in buddy allocator (Matt)
- Cleanup the region class/instance encoding (Matt)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YGxksaZGXHnFxlwg@jlahtine-mobl.ger.corp.intel.com
2021-04-08 12:46:12 +10:00

179 lines
3.8 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "intel_memory_region.h"
#include "i915_gem_region.h"
#include "i915_drv.h"
#include "i915_trace.h"
void
i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
__intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}
int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{
const u64 max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = obj->mm.region;
struct list_head *blocks = &obj->mm.blocks;
resource_size_t size = obj->base.size;
resource_size_t prev_end;
struct i915_buddy_block *block;
unsigned int flags;
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
int ret;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
if (ret)
goto err_free_sg;
GEM_BUG_ON(list_empty(blocks));
sg = st->sgl;
st->nents = 0;
sg_page_sizes = 0;
prev_end = (resource_size_t)-1;
list_for_each_entry(block, blocks, link) {
u64 block_size, offset;
block_size = min_t(u64, size,
i915_buddy_block_size(&mem->mm, block));
offset = i915_buddy_block_offset(block);
while (block_size) {
u64 len;
if (offset != prev_end || sg->length >= max_segment) {
if (st->nents) {
sg_page_sizes |= sg->length;
sg = __sg_next(sg);
}
sg_dma_address(sg) = mem->region.start + offset;
sg_dma_len(sg) = 0;
sg->length = 0;
st->nents++;
}
len = min(block_size, max_segment - sg->length);
sg->length += len;
sg_dma_len(sg) += len;
offset += len;
block_size -= len;
prev_end = offset;
}
}
sg_page_sizes |= sg->length;
sg_mark_end(sg);
i915_sg_trim(st);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
err_free_sg:
sg_free_table(st);
kfree(st);
return ret;
}
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem)
{
INIT_LIST_HEAD(&obj->mm.blocks);
obj->mm.region = intel_memory_region_get(mem);
if (obj->base.size <= mem->min_page_size)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
mutex_lock(&mem->objects.lock);
if (obj->flags & I915_BO_ALLOC_VOLATILE)
list_add(&obj->mm.region_link, &mem->objects.purgeable);
else
list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock);
}
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *mem = obj->mm.region;
mutex_lock(&mem->objects.lock);
list_del(&obj->mm.region_link);
mutex_unlock(&mem->objects.lock);
intel_memory_region_put(mem);
}
struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags)
{
struct drm_i915_gem_object *obj;
int err;
/*
* NB: Our use of resource_size_t for the size stems from using struct
* resource for the mem->region. We might need to revisit this in the
* future.
*/
GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
if (!mem)
return ERR_PTR(-ENODEV);
size = round_up(size, mem->min_page_size);
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
if (i915_gem_object_size_2big(size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
err = mem->ops->init_object(mem, obj, size, flags);
if (err)
goto err_object_free;
trace_i915_gem_object_create(obj);
return obj;
err_object_free:
i915_gem_object_free(obj);
return ERR_PTR(err);
}