Patch series "Memory allocation profiling", v6. Overview: Low overhead [1] per-callsite memory allocation profiling. Not just for debug kernels, overhead low enough to be deployed in production. Example output: root@moria-kvm:~# sort -rn /proc/allocinfo 127664128 31168 mm/page_ext.c:270 func:alloc_page_ext 56373248 4737 mm/slub.c:2259 func:alloc_slab_page 14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded 14417920 3520 mm/mm_init.c:2530 func:alloc_large_system_hash 13377536 234 block/blk-mq.c:3421 func:blk_mq_alloc_rqs 11718656 2861 mm/filemap.c:1919 func:__filemap_get_folio 9192960 2800 kernel/fork.c:307 func:alloc_thread_stack_node 4206592 4 net/netfilter/nf_conntrack_core.c:2567 func:nf_ct_alloc_hashtable 4136960 1010 drivers/staging/ctagmod/ctagmod.c:20 [ctagmod] func:ctagmod_start 3940352 962 mm/memory.c:4214 func:alloc_anon_folio 2894464 22613 fs/kernfs/dir.c:615 func:__kernfs_new_node ... Usage: kconfig options: - CONFIG_MEM_ALLOC_PROFILING - CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT - CONFIG_MEM_ALLOC_PROFILING_DEBUG adds warnings for allocations that weren't accounted because of a missing annotation sysctl: /proc/sys/vm/mem_profiling Runtime info: /proc/allocinfo Notes: [1]: Overhead To measure the overhead we are comparing the following configurations: (1) Baseline with CONFIG_MEMCG_KMEM=n (2) Disabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n) (3) Enabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=y) (4) Enabled at runtime (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n && /proc/sys/vm/mem_profiling=1) (5) Baseline with CONFIG_MEMCG_KMEM=y && allocating with __GFP_ACCOUNT (6) Disabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n) && CONFIG_MEMCG_KMEM=y (7) Enabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=y) && CONFIG_MEMCG_KMEM=y Performance overhead: To evaluate performance we implemented an in-kernel test executing multiple get_free_page/free_page and kmalloc/kfree calls with allocation sizes growing from 8 to 240 bytes with CPU frequency set to max and CPU affinity set to a specific CPU to minimize the noise. Below are results from running the test on Ubuntu 22.04.2 LTS with 6.8.0-rc1 kernel on 56 core Intel Xeon: kmalloc pgalloc (1 baseline) 6.764s 16.902s (2 default disabled) 6.793s (+0.43%) 17.007s (+0.62%) (3 default enabled) 7.197s (+6.40%) 23.666s (+40.02%) (4 runtime enabled) 7.405s (+9.48%) 23.901s (+41.41%) (5 memcg) 13.388s (+97.94%) 48.460s (+186.71%) (6 def disabled+memcg) 13.332s (+97.10%) 48.105s (+184.61%) (7 def enabled+memcg) 13.446s (+98.78%) 54.963s (+225.18%) Memory overhead: Kernel size: text data bss dec diff (1) 26515311 18890222 17018880 62424413 (2) 26524728 19423818 16740352 62688898 264485 (3) 26524724 19423818 16740352 62688894 264481 (4) 26524728 19423818 16740352 62688898 264485 (5) 26541782 18964374 16957440 62463596 39183 Memory consumption on a 56 core Intel CPU with 125GB of memory: Code tags: 192 kB PageExts: 262144 kB (256MB) SlabExts: 9876 kB (9.6MB) PcpuExts: 512 kB (0.5MB) Total overhead is 0.2% of total memory. Benchmarks: Hackbench tests run 100 times: hackbench -s 512 -l 200 -g 15 -f 25 -P baseline disabled profiling enabled profiling avg 0.3543 0.3559 (+0.0016) 0.3566 (+0.0023) stdev 0.0137 0.0188 0.0077 hackbench -l 10000 baseline disabled profiling enabled profiling avg 6.4218 6.4306 (+0.0088) 6.5077 (+0.0859) stdev 0.0933 0.0286 0.0489 stress-ng tests: stress-ng --class memory --seq 4 -t 60 stress-ng --class cpu --seq 4 -t 60 Results posted at: https://evilpiepirate.org/~kent/memalloc_prof_v4_stress-ng/ [2] https://lore.kernel.org/all/20240306182440.2003814-1-surenb@google.com/ This patch (of 37): The next patch drops vmalloc.h from a system header in order to fix a circular dependency; this adds it to all the files that were pulling it in implicitly. [kent.overstreet@linux.dev: fix arch/alpha/lib/memcpy.c] Link: https://lkml.kernel.org/r/20240327002152.3339937-1-kent.overstreet@linux.dev [surenb@google.com: fix arch/x86/mm/numa_32.c] Link: https://lkml.kernel.org/r/20240402180933.1663992-1-surenb@google.com [kent.overstreet@linux.dev: a few places were depending on sizes.h] Link: https://lkml.kernel.org/r/20240404034744.1664840-1-kent.overstreet@linux.dev [arnd@arndb.de: fix mm/kasan/hw_tags.c] Link: https://lkml.kernel.org/r/20240404124435.3121534-1-arnd@kernel.org [surenb@google.com: fix arc build] Link: https://lkml.kernel.org/r/20240405225115.431056-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-2-surenb@google.com Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com> Tested-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alex Gaynor <alex.gaynor@gmail.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andreas Hindborg <a.hindborg@samsung.com> Cc: Benno Lossin <benno.lossin@proton.me> Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Gary Guo <gary@garyguo.net> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
676 lines
17 KiB
C
676 lines
17 KiB
C
/*
|
|
* SPDX-License-Identifier: MIT
|
|
*
|
|
* Copyright © 2014-2016 Intel Corporation
|
|
*/
|
|
|
|
#include <drm/drm_cache.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include "gt/intel_gt.h"
|
|
#include "gt/intel_tlb.h"
|
|
|
|
#include "i915_drv.h"
|
|
#include "i915_gem_object.h"
|
|
#include "i915_scatterlist.h"
|
|
#include "i915_gem_lmem.h"
|
|
#include "i915_gem_mman.h"
|
|
|
|
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
|
struct sg_table *pages)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
|
|
bool shrinkable;
|
|
int i;
|
|
|
|
assert_object_held_shared(obj);
|
|
|
|
if (i915_gem_object_is_volatile(obj))
|
|
obj->mm.madv = I915_MADV_DONTNEED;
|
|
|
|
/* Make the pages coherent with the GPU (flushing any swapin). */
|
|
if (obj->cache_dirty) {
|
|
WARN_ON_ONCE(IS_DGFX(i915));
|
|
obj->write_domain = 0;
|
|
if (i915_gem_object_has_struct_page(obj))
|
|
drm_clflush_sg(pages);
|
|
obj->cache_dirty = false;
|
|
}
|
|
|
|
obj->mm.get_page.sg_pos = pages->sgl;
|
|
obj->mm.get_page.sg_idx = 0;
|
|
obj->mm.get_dma_page.sg_pos = pages->sgl;
|
|
obj->mm.get_dma_page.sg_idx = 0;
|
|
|
|
obj->mm.pages = pages;
|
|
|
|
obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
|
|
GEM_BUG_ON(!obj->mm.page_sizes.phys);
|
|
|
|
/*
|
|
* Calculate the supported page-sizes which fit into the given
|
|
* sg_page_sizes. This will give us the page-sizes which we may be able
|
|
* to use opportunistically when later inserting into the GTT. For
|
|
* example if phys=2G, then in theory we should be able to use 1G, 2M,
|
|
* 64K or 4K pages, although in practice this will depend on a number of
|
|
* other factors.
|
|
*/
|
|
obj->mm.page_sizes.sg = 0;
|
|
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
|
if (obj->mm.page_sizes.phys & ~0u << i)
|
|
obj->mm.page_sizes.sg |= BIT(i);
|
|
}
|
|
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
|
|
|
|
shrinkable = i915_gem_object_is_shrinkable(obj);
|
|
|
|
if (i915_gem_object_is_tiled(obj) &&
|
|
i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
|
|
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
|
|
i915_gem_object_set_tiling_quirk(obj);
|
|
GEM_BUG_ON(!list_empty(&obj->mm.link));
|
|
atomic_inc(&obj->mm.shrink_pin);
|
|
shrinkable = false;
|
|
}
|
|
|
|
if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
|
|
struct list_head *list;
|
|
unsigned long flags;
|
|
|
|
assert_object_held(obj);
|
|
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
|
|
|
i915->mm.shrink_count++;
|
|
i915->mm.shrink_memory += obj->base.size;
|
|
|
|
if (obj->mm.madv != I915_MADV_WILLNEED)
|
|
list = &i915->mm.purge_list;
|
|
else
|
|
list = &i915->mm.shrink_list;
|
|
list_add_tail(&obj->mm.link, list);
|
|
|
|
atomic_set(&obj->mm.shrink_pin, 0);
|
|
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
|
}
|
|
}
|
|
|
|
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
int err;
|
|
|
|
assert_object_held_shared(obj);
|
|
|
|
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
|
|
drm_dbg(&i915->drm,
|
|
"Attempting to obtain a purgeable object\n");
|
|
return -EFAULT;
|
|
}
|
|
|
|
err = obj->ops->get_pages(obj);
|
|
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
|
|
|
|
return err;
|
|
}
|
|
|
|
/* Ensure that the associated pages are gathered from the backing storage
|
|
* and pinned into our object. i915_gem_object_pin_pages() may be called
|
|
* multiple times before they are released by a single call to
|
|
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
|
|
* either as a result of memory pressure (reaping pages under the shrinker)
|
|
* or as the object is itself released.
|
|
*/
|
|
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|
{
|
|
int err;
|
|
|
|
assert_object_held(obj);
|
|
|
|
assert_object_held_shared(obj);
|
|
|
|
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
|
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
|
|
|
err = ____i915_gem_object_get_pages(obj);
|
|
if (err)
|
|
return err;
|
|
|
|
smp_mb__before_atomic();
|
|
}
|
|
atomic_inc(&obj->mm.pages_pin_count);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct i915_gem_ww_ctx ww;
|
|
int err;
|
|
|
|
i915_gem_ww_ctx_init(&ww, true);
|
|
retry:
|
|
err = i915_gem_object_lock(obj, &ww);
|
|
if (!err)
|
|
err = i915_gem_object_pin_pages(obj);
|
|
|
|
if (err == -EDEADLK) {
|
|
err = i915_gem_ww_ctx_backoff(&ww);
|
|
if (!err)
|
|
goto retry;
|
|
}
|
|
i915_gem_ww_ctx_fini(&ww);
|
|
return err;
|
|
}
|
|
|
|
/* Immediately discard the backing storage */
|
|
int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
|
{
|
|
if (obj->ops->truncate)
|
|
return obj->ops->truncate(obj);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct radix_tree_iter iter;
|
|
void __rcu **slot;
|
|
|
|
rcu_read_lock();
|
|
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
|
|
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
|
|
radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
|
|
radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
|
|
{
|
|
if (is_vmalloc_addr(ptr))
|
|
vunmap(ptr);
|
|
}
|
|
|
|
static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
struct intel_gt *gt;
|
|
int id;
|
|
|
|
for_each_gt(gt, i915, id) {
|
|
if (!obj->mm.tlb[id])
|
|
continue;
|
|
|
|
intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
|
|
obj->mm.tlb[id] = 0;
|
|
}
|
|
}
|
|
|
|
struct sg_table *
|
|
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct sg_table *pages;
|
|
|
|
assert_object_held_shared(obj);
|
|
|
|
pages = fetch_and_zero(&obj->mm.pages);
|
|
if (IS_ERR_OR_NULL(pages))
|
|
return pages;
|
|
|
|
if (i915_gem_object_is_volatile(obj))
|
|
obj->mm.madv = I915_MADV_WILLNEED;
|
|
|
|
if (!i915_gem_object_has_self_managed_shrink_list(obj))
|
|
i915_gem_object_make_unshrinkable(obj);
|
|
|
|
if (obj->mm.mapping) {
|
|
unmap_object(obj, page_mask_bits(obj->mm.mapping));
|
|
obj->mm.mapping = NULL;
|
|
}
|
|
|
|
__i915_gem_object_reset_page_iter(obj);
|
|
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
|
|
|
|
flush_tlb_invalidate(obj);
|
|
|
|
return pages;
|
|
}
|
|
|
|
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct sg_table *pages;
|
|
|
|
if (i915_gem_object_has_pinned_pages(obj))
|
|
return -EBUSY;
|
|
|
|
/* May be called by shrinker from within get_pages() (on another bo) */
|
|
assert_object_held_shared(obj);
|
|
|
|
i915_gem_object_release_mmap_offset(obj);
|
|
|
|
/*
|
|
* ->put_pages might need to allocate memory for the bit17 swizzle
|
|
* array, hence protect them from being reaped by removing them from gtt
|
|
* lists early.
|
|
*/
|
|
pages = __i915_gem_object_unset_pages(obj);
|
|
|
|
/*
|
|
* XXX Temporary hijinx to avoid updating all backends to handle
|
|
* NULL pages. In the future, when we have more asynchronous
|
|
* get_pages backends we should be better able to handle the
|
|
* cancellation of the async task in a more uniform manner.
|
|
*/
|
|
if (!IS_ERR_OR_NULL(pages))
|
|
obj->ops->put_pages(obj, pages);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* The 'mapping' part of i915_gem_object_pin_map() below */
|
|
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
|
|
enum i915_map_type type)
|
|
{
|
|
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
|
|
struct page *stack[32], **pages = stack, *page;
|
|
struct sgt_iter iter;
|
|
pgprot_t pgprot;
|
|
void *vaddr;
|
|
|
|
switch (type) {
|
|
default:
|
|
MISSING_CASE(type);
|
|
fallthrough; /* to use PAGE_KERNEL anyway */
|
|
case I915_MAP_WB:
|
|
/*
|
|
* On 32b, highmem using a finite set of indirect PTE (i.e.
|
|
* vmap) to provide virtual mappings of the high pages.
|
|
* As these are finite, map_new_virtual() must wait for some
|
|
* other kmap() to finish when it runs out. If we map a large
|
|
* number of objects, there is no method for it to tell us
|
|
* to release the mappings, and we deadlock.
|
|
*
|
|
* However, if we make an explicit vmap of the page, that
|
|
* uses a larger vmalloc arena, and also has the ability
|
|
* to tell us to release unwanted mappings. Most importantly,
|
|
* it will fail and propagate an error instead of waiting
|
|
* forever.
|
|
*
|
|
* So if the page is beyond the 32b boundary, make an explicit
|
|
* vmap.
|
|
*/
|
|
if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
|
|
return page_address(sg_page(obj->mm.pages->sgl));
|
|
pgprot = PAGE_KERNEL;
|
|
break;
|
|
case I915_MAP_WC:
|
|
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
|
|
break;
|
|
}
|
|
|
|
if (n_pages > ARRAY_SIZE(stack)) {
|
|
/* Too big for stack -- allocate temporary array instead */
|
|
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
|
|
if (!pages)
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
i = 0;
|
|
for_each_sgt_page(page, iter, obj->mm.pages)
|
|
pages[i++] = page;
|
|
vaddr = vmap(pages, n_pages, 0, pgprot);
|
|
if (pages != stack)
|
|
kvfree(pages);
|
|
|
|
return vaddr ?: ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
|
|
enum i915_map_type type)
|
|
{
|
|
resource_size_t iomap = obj->mm.region->iomap.base -
|
|
obj->mm.region->region.start;
|
|
unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
|
|
unsigned long stack[32], *pfns = stack, i;
|
|
struct sgt_iter iter;
|
|
dma_addr_t addr;
|
|
void *vaddr;
|
|
|
|
GEM_BUG_ON(type != I915_MAP_WC);
|
|
|
|
if (n_pfn > ARRAY_SIZE(stack)) {
|
|
/* Too big for stack -- allocate temporary array instead */
|
|
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
|
|
if (!pfns)
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
i = 0;
|
|
for_each_sgt_daddr(addr, iter, obj->mm.pages)
|
|
pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
|
|
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
|
|
if (pfns != stack)
|
|
kvfree(pfns);
|
|
|
|
return vaddr ?: ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
/* get, pin, and map the pages of the object into kernel space */
|
|
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|
enum i915_map_type type)
|
|
{
|
|
enum i915_map_type has_type;
|
|
bool pinned;
|
|
void *ptr;
|
|
int err;
|
|
|
|
if (!i915_gem_object_has_struct_page(obj) &&
|
|
!i915_gem_object_has_iomem(obj))
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
assert_object_held(obj);
|
|
|
|
pinned = !(type & I915_MAP_OVERRIDE);
|
|
type &= ~I915_MAP_OVERRIDE;
|
|
|
|
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
|
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
|
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
|
|
|
err = ____i915_gem_object_get_pages(obj);
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
|
|
smp_mb__before_atomic();
|
|
}
|
|
atomic_inc(&obj->mm.pages_pin_count);
|
|
pinned = false;
|
|
}
|
|
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
|
|
|
/*
|
|
* For discrete our CPU mappings needs to be consistent in order to
|
|
* function correctly on !x86. When mapping things through TTM, we use
|
|
* the same rules to determine the caching type.
|
|
*
|
|
* The caching rules, starting from DG1:
|
|
*
|
|
* - If the object can be placed in device local-memory, then the
|
|
* pages should be allocated and mapped as write-combined only.
|
|
*
|
|
* - Everything else is always allocated and mapped as write-back,
|
|
* with the guarantee that everything is also coherent with the
|
|
* GPU.
|
|
*
|
|
* Internal users of lmem are already expected to get this right, so no
|
|
* fudging needed there.
|
|
*/
|
|
if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
|
|
if (type != I915_MAP_WC && !obj->mm.n_placements) {
|
|
ptr = ERR_PTR(-ENODEV);
|
|
goto err_unpin;
|
|
}
|
|
|
|
type = I915_MAP_WC;
|
|
} else if (IS_DGFX(to_i915(obj->base.dev))) {
|
|
type = I915_MAP_WB;
|
|
}
|
|
|
|
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
|
if (ptr && has_type != type) {
|
|
if (pinned) {
|
|
ptr = ERR_PTR(-EBUSY);
|
|
goto err_unpin;
|
|
}
|
|
|
|
unmap_object(obj, ptr);
|
|
|
|
ptr = obj->mm.mapping = NULL;
|
|
}
|
|
|
|
if (!ptr) {
|
|
err = i915_gem_object_wait_moving_fence(obj, true);
|
|
if (err) {
|
|
ptr = ERR_PTR(err);
|
|
goto err_unpin;
|
|
}
|
|
|
|
if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
|
|
ptr = ERR_PTR(-ENODEV);
|
|
else if (i915_gem_object_has_struct_page(obj))
|
|
ptr = i915_gem_object_map_page(obj, type);
|
|
else
|
|
ptr = i915_gem_object_map_pfn(obj, type);
|
|
if (IS_ERR(ptr))
|
|
goto err_unpin;
|
|
|
|
obj->mm.mapping = page_pack_bits(ptr, type);
|
|
}
|
|
|
|
return ptr;
|
|
|
|
err_unpin:
|
|
atomic_dec(&obj->mm.pages_pin_count);
|
|
return ptr;
|
|
}
|
|
|
|
void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
|
|
enum i915_map_type type)
|
|
{
|
|
void *ret;
|
|
|
|
i915_gem_object_lock(obj, NULL);
|
|
ret = i915_gem_object_pin_map(obj, type);
|
|
i915_gem_object_unlock(obj);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
|
unsigned long offset,
|
|
unsigned long size)
|
|
{
|
|
enum i915_map_type has_type;
|
|
void *ptr;
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
|
|
offset, size, obj->base.size));
|
|
|
|
wmb(); /* let all previous writes be visible to coherent partners */
|
|
obj->mm.dirty = true;
|
|
|
|
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
|
|
return;
|
|
|
|
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
|
if (has_type == I915_MAP_WC)
|
|
return;
|
|
|
|
drm_clflush_virt_range(ptr + offset, size);
|
|
if (size == obj->base.size) {
|
|
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
|
|
obj->cache_dirty = false;
|
|
}
|
|
}
|
|
|
|
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
|
|
{
|
|
GEM_BUG_ON(!obj->mm.mapping);
|
|
|
|
/*
|
|
* We allow removing the mapping from underneath pinned pages!
|
|
*
|
|
* Furthermore, since this is an unsafe operation reserved only
|
|
* for construction time manipulation, we ignore locking prudence.
|
|
*/
|
|
unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
|
|
|
|
i915_gem_object_unpin_map(obj);
|
|
}
|
|
|
|
struct scatterlist *
|
|
__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
|
|
struct i915_gem_object_page_iter *iter,
|
|
pgoff_t n,
|
|
unsigned int *offset)
|
|
|
|
{
|
|
const bool dma = iter == &obj->mm.get_dma_page ||
|
|
iter == &obj->ttm.get_io_page;
|
|
unsigned int idx, count;
|
|
struct scatterlist *sg;
|
|
|
|
might_sleep();
|
|
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
|
|
if (!i915_gem_object_has_pinned_pages(obj))
|
|
assert_object_held(obj);
|
|
|
|
/* As we iterate forward through the sg, we record each entry in a
|
|
* radixtree for quick repeated (backwards) lookups. If we have seen
|
|
* this index previously, we will have an entry for it.
|
|
*
|
|
* Initial lookup is O(N), but this is amortized to O(1) for
|
|
* sequential page access (where each new request is consecutive
|
|
* to the previous one). Repeated lookups are O(lg(obj->base.size)),
|
|
* i.e. O(1) with a large constant!
|
|
*/
|
|
if (n < READ_ONCE(iter->sg_idx))
|
|
goto lookup;
|
|
|
|
mutex_lock(&iter->lock);
|
|
|
|
/* We prefer to reuse the last sg so that repeated lookup of this
|
|
* (or the subsequent) sg are fast - comparing against the last
|
|
* sg is faster than going through the radixtree.
|
|
*/
|
|
|
|
sg = iter->sg_pos;
|
|
idx = iter->sg_idx;
|
|
count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
|
|
|
|
while (idx + count <= n) {
|
|
void *entry;
|
|
unsigned long i;
|
|
int ret;
|
|
|
|
/* If we cannot allocate and insert this entry, or the
|
|
* individual pages from this range, cancel updating the
|
|
* sg_idx so that on this lookup we are forced to linearly
|
|
* scan onwards, but on future lookups we will try the
|
|
* insertion again (in which case we need to be careful of
|
|
* the error return reporting that we have already inserted
|
|
* this index).
|
|
*/
|
|
ret = radix_tree_insert(&iter->radix, idx, sg);
|
|
if (ret && ret != -EEXIST)
|
|
goto scan;
|
|
|
|
entry = xa_mk_value(idx);
|
|
for (i = 1; i < count; i++) {
|
|
ret = radix_tree_insert(&iter->radix, idx + i, entry);
|
|
if (ret && ret != -EEXIST)
|
|
goto scan;
|
|
}
|
|
|
|
idx += count;
|
|
sg = ____sg_next(sg);
|
|
count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
|
|
}
|
|
|
|
scan:
|
|
iter->sg_pos = sg;
|
|
iter->sg_idx = idx;
|
|
|
|
mutex_unlock(&iter->lock);
|
|
|
|
if (unlikely(n < idx)) /* insertion completed by another thread */
|
|
goto lookup;
|
|
|
|
/* In case we failed to insert the entry into the radixtree, we need
|
|
* to look beyond the current sg.
|
|
*/
|
|
while (idx + count <= n) {
|
|
idx += count;
|
|
sg = ____sg_next(sg);
|
|
count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
|
|
}
|
|
|
|
*offset = n - idx;
|
|
return sg;
|
|
|
|
lookup:
|
|
rcu_read_lock();
|
|
|
|
sg = radix_tree_lookup(&iter->radix, n);
|
|
GEM_BUG_ON(!sg);
|
|
|
|
/* If this index is in the middle of multi-page sg entry,
|
|
* the radix tree will contain a value entry that points
|
|
* to the start of that range. We will return the pointer to
|
|
* the base page and the offset of this page within the
|
|
* sg entry's range.
|
|
*/
|
|
*offset = 0;
|
|
if (unlikely(xa_is_value(sg))) {
|
|
unsigned long base = xa_to_value(sg);
|
|
|
|
sg = radix_tree_lookup(&iter->radix, base);
|
|
GEM_BUG_ON(!sg);
|
|
|
|
*offset = n - base;
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return sg;
|
|
}
|
|
|
|
struct page *
|
|
__i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
|
|
{
|
|
struct scatterlist *sg;
|
|
unsigned int offset;
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
|
|
|
|
sg = i915_gem_object_get_sg(obj, n, &offset);
|
|
return nth_page(sg_page(sg), offset);
|
|
}
|
|
|
|
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
|
|
struct page *
|
|
__i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
|
|
{
|
|
struct page *page;
|
|
|
|
page = i915_gem_object_get_page(obj, n);
|
|
if (!obj->mm.dirty)
|
|
set_page_dirty(page);
|
|
|
|
return page;
|
|
}
|
|
|
|
dma_addr_t
|
|
__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
|
pgoff_t n, unsigned int *len)
|
|
{
|
|
struct scatterlist *sg;
|
|
unsigned int offset;
|
|
|
|
sg = i915_gem_object_get_sg_dma(obj, n, &offset);
|
|
|
|
if (len)
|
|
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
|
|
|
|
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
|
|
}
|
|
|
|
dma_addr_t
|
|
__i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
|
|
{
|
|
return i915_gem_object_get_dma_address_len(obj, n, NULL);
|
|
}
|