There is an impedance mismatch between the scatterlist API using unsigned int and our memory/page accounting in unsigned long. That is we may try to create a scatterlist for a large object that overflows returning a small table into which we try to fit very many pages. As the object size is under the control of userspace, we have to be prudent and catch the conversion errors. To catch the implicit truncation we check before calling scattterlist creation Apis. we use overflows_type check and report E2BIG if the overflows may raise. When caller does not return errno, use WARN_ON to report a problem. This is already used in our create ioctls to indicate if the uABI request is simply too large for the backing store. Failing that type check, we have a second check at sg_alloc_table time to make sure the values we are passing into the scatterlist API are not truncated. v2: Move added i915_utils's macro into drm_util header (Jani N) v5: Fix macros to be enclosed in parentheses for complex values Fix too long line warning v8: Replace safe_conversion() with check_assign() (Kees) v14: Remove shadowing macros of scatterlist creation api and fix to explicitly overflow check where the scatterlist creation APIs are called. (Jani) v15: Add missing returning of error code when the WARN_ON() has been detected. (Jani) Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Brian Welty <brian.welty@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Co-developed-by: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com> Signed-off-by: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com> Reviewed-by: Nirmoy Das <nirmoy.das@intel.com> Reviewed-by: Mauro Carvalho Chehab <mchehab@kernel.org> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Acked-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221228192252.917299-3-gwan-gyeong.mun@intel.com
131 lines
2.9 KiB
C
131 lines
2.9 KiB
C
/*
|
|
* SPDX-License-Identifier: MIT
|
|
*
|
|
* Copyright © 2016 Intel Corporation
|
|
*/
|
|
|
|
#include "i915_scatterlist.h"
|
|
|
|
#include "huge_gem_object.h"
|
|
|
|
static void huge_free_pages(struct drm_i915_gem_object *obj,
|
|
struct sg_table *pages)
|
|
{
|
|
unsigned long nreal = obj->scratch / PAGE_SIZE;
|
|
struct sgt_iter sgt_iter;
|
|
struct page *page;
|
|
|
|
for_each_sgt_page(page, sgt_iter, pages) {
|
|
__free_page(page);
|
|
if (!--nreal)
|
|
break;
|
|
}
|
|
|
|
sg_free_table(pages);
|
|
kfree(pages);
|
|
}
|
|
|
|
static int huge_get_pages(struct drm_i915_gem_object *obj)
|
|
{
|
|
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
|
|
const unsigned long nreal = obj->scratch / PAGE_SIZE;
|
|
unsigned int npages; /* restricted by sg_alloc_table */
|
|
struct scatterlist *sg, *src, *end;
|
|
struct sg_table *pages;
|
|
unsigned long n;
|
|
|
|
if (overflows_type(obj->base.size / PAGE_SIZE, npages))
|
|
return -E2BIG;
|
|
|
|
npages = obj->base.size / PAGE_SIZE;
|
|
pages = kmalloc(sizeof(*pages), GFP);
|
|
if (!pages)
|
|
return -ENOMEM;
|
|
|
|
if (sg_alloc_table(pages, npages, GFP)) {
|
|
kfree(pages);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
sg = pages->sgl;
|
|
for (n = 0; n < nreal; n++) {
|
|
struct page *page;
|
|
|
|
page = alloc_page(GFP | __GFP_HIGHMEM);
|
|
if (!page) {
|
|
sg_mark_end(sg);
|
|
goto err;
|
|
}
|
|
|
|
sg_set_page(sg, page, PAGE_SIZE, 0);
|
|
sg = __sg_next(sg);
|
|
}
|
|
if (nreal < npages) {
|
|
for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
|
|
sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
|
|
src = __sg_next(src);
|
|
if (src == end)
|
|
src = pages->sgl;
|
|
}
|
|
}
|
|
|
|
if (i915_gem_gtt_prepare_pages(obj, pages))
|
|
goto err;
|
|
|
|
__i915_gem_object_set_pages(obj, pages);
|
|
|
|
return 0;
|
|
|
|
err:
|
|
huge_free_pages(obj, pages);
|
|
return -ENOMEM;
|
|
#undef GFP
|
|
}
|
|
|
|
static void huge_put_pages(struct drm_i915_gem_object *obj,
|
|
struct sg_table *pages)
|
|
{
|
|
i915_gem_gtt_finish_pages(obj, pages);
|
|
huge_free_pages(obj, pages);
|
|
|
|
obj->mm.dirty = false;
|
|
}
|
|
|
|
static const struct drm_i915_gem_object_ops huge_ops = {
|
|
.name = "huge-gem",
|
|
.get_pages = huge_get_pages,
|
|
.put_pages = huge_put_pages,
|
|
};
|
|
|
|
struct drm_i915_gem_object *
|
|
huge_gem_object(struct drm_i915_private *i915,
|
|
phys_addr_t phys_size,
|
|
dma_addr_t dma_size)
|
|
{
|
|
static struct lock_class_key lock_class;
|
|
struct drm_i915_gem_object *obj;
|
|
unsigned int cache_level;
|
|
|
|
GEM_BUG_ON(!phys_size || phys_size > dma_size);
|
|
GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
|
|
GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
|
|
|
|
if (overflows_type(dma_size, obj->base.size))
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
obj = i915_gem_object_alloc();
|
|
if (!obj)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
|
|
i915_gem_object_init(obj, &huge_ops, &lock_class, 0);
|
|
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
|
|
|
|
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
|
i915_gem_object_set_cache_coherency(obj, cache_level);
|
|
obj->scratch = phys_size;
|
|
|
|
return obj;
|
|
}
|