It's unclear what reference the initial vma kref reference refers to. A vma can have multiple weak references, the object vma list, the vm's bound list and the GT's closed_list, and the initial vma reference can be put from lookups of all these lists. With the current implementation this means that any holder of yet another vma refcount (currently only i915_gem_object_unbind()) needs to be holding two of either *) An object refcount, *) A vm open count *) A vma open count in order for us to not risk leaking a reference by having the initial vma reference being put twice. Address this by re-introducing i915_vma_destroy() which removes all weak references of the vma and *then* puts the initial vma refcount. This makes a strong vma reference hold on to the vma unconditionally. Perhaps a better name would be i915_vma_revoke() or i915_vma_zombify(), since other callers may still hold a refcount, but with the prospect of being able to replace the vma refcount with the object lock in the near future, let's stick with i915_vma_destroy(). Finally this commit fixes a race in that previously i915_vma_release() and now i915_vma_destroy() could destroy a vma without taking the vm->mutex after an advisory check that the vma mm_node was not allocated. This would race with the ungrab_vma() function creating a trace similar to the below one. This was fixed in one of the __i915_vma_put() callsites in commitbc1922e5d3
("drm/i915: Fix a race between vma / object destruction and unbinding") but although not seemingly triggered by CI, that is not sufficient. This patch is needed to fix that properly. [823.012188] Console: switching to colour dummy device 80x25 [823.012422] [IGT] gem_ppgtt: executing [823.016667] [IGT] gem_ppgtt: starting subtest blt-vs-render-ctx0 [852.436465] stack segment: 0000 [#1] PREEMPT SMP NOPTI [852.436480] CPU: 0 PID: 3200 Comm: gem_ppgtt Not tainted 5.16.0-CI-CI_DRM_11115+ #1 [852.436489] Hardware name: Intel Corporation Alder Lake Client Platform/AlderLake-P DDR5 RVP, BIOS ADLPFWI1.R00.2422.A00.2110131104 10/13/2021 [852.436499] RIP: 0010:ungrab_vma+0x9/0x80 [i915] [852.436711] Code: ef e8 4b 85 cf e0 e8 36 a3 d6 e0 8b 83 f8 9c 00 00 85 c0 75 e1 5b 5d 41 5c 41 5d c3 e9 d6 fd 14 00 55 53 48 8b af c0 00 00 00 <8b> 45 00 85 c0 75 03 5b 5d c3 48 8b 85 a0 02 00 00 48 89 fb 48 8b [852.436727] RSP: 0018:ffffc90006db7880 EFLAGS: 00010246 [852.436734] RAX: 0000000000000000 RBX: ffffc90006db7598 RCX: 0000000000000000 [852.436742] RDX: ffff88815349e898 RSI: ffff88815349e858 RDI: ffff88810a284140 [852.436748] RBP: 6b6b6b6b6b6b6b6b R08: ffff88815349e898 R09: ffff88815349e8e8 [852.436754] R10: 0000000000000001 R11: 0000000051ef1141 R12: ffff88810a284140 [852.436762] R13: 0000000000000000 R14: ffff88815349e868 R15: ffff88810a284458 [852.436770] FS: 00007f5c04b04e40(0000) GS:ffff88849f000000(0000) knlGS:0000000000000000 [852.436781] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [852.436788] CR2: 00007f5c04b38fe0 CR3: 000000010a6e8001 CR4: 0000000000770ef0 [852.436797] PKRU: 55555554 [852.436801] Call Trace: [852.436806] <TASK> [852.436811] i915_gem_evict_for_node+0x33c/0x3c0 [i915] [852.437014] i915_gem_gtt_reserve+0x106/0x130 [i915] [852.437211] i915_vma_pin_ww+0x8f4/0xb60 [i915] [852.437412] eb_validate_vmas+0x688/0x860 [i915] [852.437596] i915_gem_do_execbuffer+0xc0e/0x25b0 [i915] [852.437770] ? deactivate_slab+0x5f2/0x7d0 [852.437778] ? _raw_spin_unlock_irqrestore+0x50/0x60 [852.437789] ? i915_gem_execbuffer2_ioctl+0xc6/0x2c0 [i915] [852.437944] ? init_object+0x49/0x80 [852.437950] ? __lock_acquire+0x5e6/0x2580 [852.437963] i915_gem_execbuffer2_ioctl+0x116/0x2c0 [i915] [852.438129] ? i915_gem_do_execbuffer+0x25b0/0x25b0 [i915] [852.438300] drm_ioctl_kernel+0xac/0x140 [852.438310] drm_ioctl+0x201/0x3d0 [852.438316] ? i915_gem_do_execbuffer+0x25b0/0x25b0 [i915] [852.438490] __x64_sys_ioctl+0x6a/0xa0 [852.438498] do_syscall_64+0x37/0xb0 [852.438507] entry_SYSCALL_64_after_hwframe+0x44/0xae [852.438515] RIP: 0033:0x7f5c0415b317 [852.438523] Code: b3 66 90 48 8b 05 71 4b 2d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 41 4b 2d 00 f7 d8 64 89 01 48 [852.438542] RSP: 002b:00007ffd765039a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [852.438553] RAX: ffffffffffffffda RBX: 000055e4d7829dd0 RCX: 00007f5c0415b317 [852.438562] RDX: 00007ffd76503a00 RSI: 00000000c0406469 RDI: 0000000000000017 [852.438571] RBP: 00007ffd76503a00 R08: 0000000000000000 R09: 0000000000000081 [852.438579] R10: 00000000ffffff7f R11: 0000000000000246 R12: 00000000c0406469 [852.438587] R13: 0000000000000017 R14: 00007ffd76503a00 R15: 0000000000000000 [852.438598] </TASK> [852.438602] Modules linked in: snd_hda_codec_hdmi i915 mei_hdcp x86_pkg_temp_thermal snd_hda_intel snd_intel_dspcfg drm_buddy coretemp crct10dif_pclmul crc32_pclmul snd_hda_codec ttm ghash_clmulni_intel snd_hwdep snd_hda_core e1000e drm_dp_helper ptp snd_pcm mei_me drm_kms_helper pps_core mei syscopyarea sysfillrect sysimgblt fb_sys_fops prime_numbers intel_lpss_pci smsc75xx usbnet mii [852.440310] ---[ end trace e52cdd2fe4fd911c ]--- v2: Fix typos in the commit message. Fixes:7e00897be8
("drm/i915: Add object locking to i915_gem_evict_for_node and i915_gem_evict_something, v2.") Fixes:bc1922e5d3
("drm/i915: Fix a race between vma / object destruction and unbinding") Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220222133209.587978-1-thomas.hellstrom@linux.intel.com
810 lines
23 KiB
C
810 lines
23 KiB
C
/*
|
|
* Copyright © 2017 Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
* IN THE SOFTWARE.
|
|
*
|
|
*/
|
|
|
|
#include <linux/sched/mm.h>
|
|
|
|
#include <drm/drm_cache.h>
|
|
|
|
#include "display/intel_frontbuffer.h"
|
|
#include "pxp/intel_pxp.h"
|
|
|
|
#include "i915_drv.h"
|
|
#include "i915_file_private.h"
|
|
#include "i915_gem_clflush.h"
|
|
#include "i915_gem_context.h"
|
|
#include "i915_gem_dmabuf.h"
|
|
#include "i915_gem_mman.h"
|
|
#include "i915_gem_object.h"
|
|
#include "i915_gem_ttm.h"
|
|
#include "i915_memcpy.h"
|
|
#include "i915_trace.h"
|
|
|
|
static struct kmem_cache *slab_objects;
|
|
|
|
static const struct drm_gem_object_funcs i915_gem_object_funcs;
|
|
|
|
struct drm_i915_gem_object *i915_gem_object_alloc(void)
|
|
{
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
|
|
if (!obj)
|
|
return NULL;
|
|
obj->base.funcs = &i915_gem_object_funcs;
|
|
|
|
return obj;
|
|
}
|
|
|
|
void i915_gem_object_free(struct drm_i915_gem_object *obj)
|
|
{
|
|
return kmem_cache_free(slab_objects, obj);
|
|
}
|
|
|
|
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|
const struct drm_i915_gem_object_ops *ops,
|
|
struct lock_class_key *key, unsigned flags)
|
|
{
|
|
/*
|
|
* A gem object is embedded both in a struct ttm_buffer_object :/ and
|
|
* in a drm_i915_gem_object. Make sure they are aliased.
|
|
*/
|
|
BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
|
|
offsetof(typeof(*obj), __do_not_access.base));
|
|
|
|
spin_lock_init(&obj->vma.lock);
|
|
INIT_LIST_HEAD(&obj->vma.list);
|
|
|
|
INIT_LIST_HEAD(&obj->mm.link);
|
|
|
|
INIT_LIST_HEAD(&obj->lut_list);
|
|
spin_lock_init(&obj->lut_lock);
|
|
|
|
spin_lock_init(&obj->mmo.lock);
|
|
obj->mmo.offsets = RB_ROOT;
|
|
|
|
init_rcu_head(&obj->rcu);
|
|
|
|
obj->ops = ops;
|
|
GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
|
|
obj->flags = flags;
|
|
|
|
obj->mm.madv = I915_MADV_WILLNEED;
|
|
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
|
|
mutex_init(&obj->mm.get_page.lock);
|
|
INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
|
|
mutex_init(&obj->mm.get_dma_page.lock);
|
|
}
|
|
|
|
/**
|
|
* __i915_gem_object_fini - Clean up a GEM object initialization
|
|
* @obj: The gem object to cleanup
|
|
*
|
|
* This function cleans up gem object fields that are set up by
|
|
* drm_gem_private_object_init() and i915_gem_object_init().
|
|
* It's primarily intended as a helper for backends that need to
|
|
* clean up the gem object in separate steps.
|
|
*/
|
|
void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
|
|
{
|
|
mutex_destroy(&obj->mm.get_page.lock);
|
|
mutex_destroy(&obj->mm.get_dma_page.lock);
|
|
dma_resv_fini(&obj->base._resv);
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
|
|
* for a given cache_level
|
|
* @obj: #drm_i915_gem_object
|
|
* @cache_level: cache level
|
|
*/
|
|
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
|
unsigned int cache_level)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
obj->cache_level = cache_level;
|
|
|
|
if (cache_level != I915_CACHE_NONE)
|
|
obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
|
|
I915_BO_CACHE_COHERENT_FOR_WRITE);
|
|
else if (HAS_LLC(i915))
|
|
obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
|
|
else
|
|
obj->cache_coherent = 0;
|
|
|
|
obj->cache_dirty =
|
|
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
|
|
!IS_DGFX(i915);
|
|
}
|
|
|
|
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
/*
|
|
* This is purely from a security perspective, so we simply don't care
|
|
* about non-userspace objects being able to bypass the LLC.
|
|
*/
|
|
if (!(obj->flags & I915_BO_ALLOC_USER))
|
|
return false;
|
|
|
|
/*
|
|
* EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
|
|
* possible for userspace to bypass the GTT caching bits set by the
|
|
* kernel, as per the given object cache_level. This is troublesome
|
|
* since the heavy flush we apply when first gathering the pages is
|
|
* skipped if the kernel thinks the object is coherent with the GPU. As
|
|
* a result it might be possible to bypass the cache and read the
|
|
* contents of the page directly, which could be stale data. If it's
|
|
* just a case of userspace shooting themselves in the foot then so be
|
|
* it, but since i915 takes the stance of always zeroing memory before
|
|
* handing it to userspace, we need to prevent this.
|
|
*/
|
|
return IS_JSL_EHL(i915);
|
|
}
|
|
|
|
static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
|
{
|
|
struct drm_i915_gem_object *obj = to_intel_bo(gem);
|
|
struct drm_i915_file_private *fpriv = file->driver_priv;
|
|
struct i915_lut_handle bookmark = {};
|
|
struct i915_mmap_offset *mmo, *mn;
|
|
struct i915_lut_handle *lut, *ln;
|
|
LIST_HEAD(close);
|
|
|
|
spin_lock(&obj->lut_lock);
|
|
list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
|
|
struct i915_gem_context *ctx = lut->ctx;
|
|
|
|
if (ctx && ctx->file_priv == fpriv) {
|
|
i915_gem_context_get(ctx);
|
|
list_move(&lut->obj_link, &close);
|
|
}
|
|
|
|
/* Break long locks, and carefully continue on from this spot */
|
|
if (&ln->obj_link != &obj->lut_list) {
|
|
list_add_tail(&bookmark.obj_link, &ln->obj_link);
|
|
if (cond_resched_lock(&obj->lut_lock))
|
|
list_safe_reset_next(&bookmark, ln, obj_link);
|
|
__list_del_entry(&bookmark.obj_link);
|
|
}
|
|
}
|
|
spin_unlock(&obj->lut_lock);
|
|
|
|
spin_lock(&obj->mmo.lock);
|
|
rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
|
|
drm_vma_node_revoke(&mmo->vma_node, file);
|
|
spin_unlock(&obj->mmo.lock);
|
|
|
|
list_for_each_entry_safe(lut, ln, &close, obj_link) {
|
|
struct i915_gem_context *ctx = lut->ctx;
|
|
struct i915_vma *vma;
|
|
|
|
/*
|
|
* We allow the process to have multiple handles to the same
|
|
* vma, in the same fd namespace, by virtue of flink/open.
|
|
*/
|
|
|
|
mutex_lock(&ctx->lut_mutex);
|
|
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
|
|
if (vma) {
|
|
GEM_BUG_ON(vma->obj != obj);
|
|
GEM_BUG_ON(!atomic_read(&vma->open_count));
|
|
i915_vma_close(vma);
|
|
}
|
|
mutex_unlock(&ctx->lut_mutex);
|
|
|
|
i915_gem_context_put(lut->ctx);
|
|
i915_lut_handle_free(lut);
|
|
i915_gem_object_put(obj);
|
|
}
|
|
}
|
|
|
|
void __i915_gem_free_object_rcu(struct rcu_head *head)
|
|
{
|
|
struct drm_i915_gem_object *obj =
|
|
container_of(head, typeof(*obj), rcu);
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
i915_gem_object_free(obj);
|
|
|
|
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
|
|
atomic_dec(&i915->mm.free_count);
|
|
}
|
|
|
|
static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
|
|
{
|
|
/* Skip serialisation and waking the device if known to be not used. */
|
|
|
|
if (obj->userfault_count)
|
|
i915_gem_object_release_mmap_gtt(obj);
|
|
|
|
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
|
|
struct i915_mmap_offset *mmo, *mn;
|
|
|
|
i915_gem_object_release_mmap_offset(obj);
|
|
|
|
rbtree_postorder_for_each_entry_safe(mmo, mn,
|
|
&obj->mmo.offsets,
|
|
offset) {
|
|
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
|
|
&mmo->vma_node);
|
|
kfree(mmo);
|
|
}
|
|
obj->mmo.offsets = RB_ROOT;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* __i915_gem_object_pages_fini - Clean up pages use of a gem object
|
|
* @obj: The gem object to clean up
|
|
*
|
|
* This function cleans up usage of the object mm.pages member. It
|
|
* is intended for backends that need to clean up a gem object in
|
|
* separate steps and needs to be called when the object is idle before
|
|
* the object's backing memory is freed.
|
|
*/
|
|
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
|
|
{
|
|
assert_object_held(obj);
|
|
|
|
if (!list_empty(&obj->vma.list)) {
|
|
struct i915_vma *vma;
|
|
|
|
spin_lock(&obj->vma.lock);
|
|
while ((vma = list_first_entry_or_null(&obj->vma.list,
|
|
struct i915_vma,
|
|
obj_link))) {
|
|
GEM_BUG_ON(vma->obj != obj);
|
|
spin_unlock(&obj->vma.lock);
|
|
|
|
i915_vma_destroy(vma);
|
|
|
|
spin_lock(&obj->vma.lock);
|
|
}
|
|
spin_unlock(&obj->vma.lock);
|
|
}
|
|
|
|
__i915_gem_object_free_mmaps(obj);
|
|
|
|
atomic_set(&obj->mm.pages_pin_count, 0);
|
|
__i915_gem_object_put_pages(obj);
|
|
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
|
}
|
|
|
|
void __i915_gem_free_object(struct drm_i915_gem_object *obj)
|
|
{
|
|
trace_i915_gem_object_destroy(obj);
|
|
|
|
GEM_BUG_ON(!list_empty(&obj->lut_list));
|
|
|
|
bitmap_free(obj->bit_17);
|
|
|
|
if (obj->base.import_attach)
|
|
drm_prime_gem_destroy(&obj->base, NULL);
|
|
|
|
drm_gem_free_mmap_offset(&obj->base);
|
|
|
|
if (obj->ops->release)
|
|
obj->ops->release(obj);
|
|
|
|
if (obj->mm.n_placements > 1)
|
|
kfree(obj->mm.placements);
|
|
|
|
if (obj->shares_resv_from)
|
|
i915_vm_resv_put(obj->shares_resv_from);
|
|
|
|
__i915_gem_object_fini(obj);
|
|
}
|
|
|
|
static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|
struct llist_node *freed)
|
|
{
|
|
struct drm_i915_gem_object *obj, *on;
|
|
|
|
llist_for_each_entry_safe(obj, on, freed, freed) {
|
|
might_sleep();
|
|
if (obj->ops->delayed_free) {
|
|
obj->ops->delayed_free(obj);
|
|
continue;
|
|
}
|
|
|
|
if (!i915_gem_object_trylock(obj, NULL)) {
|
|
/* busy, toss it back to the pile */
|
|
if (llist_add(&obj->freed, &i915->mm.free_list))
|
|
queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
|
|
continue;
|
|
}
|
|
|
|
__i915_gem_object_pages_fini(obj);
|
|
i915_gem_object_unlock(obj);
|
|
__i915_gem_free_object(obj);
|
|
|
|
/* But keep the pointer alive for RCU-protected lookups */
|
|
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
|
|
cond_resched();
|
|
}
|
|
}
|
|
|
|
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
|
|
{
|
|
struct llist_node *freed = llist_del_all(&i915->mm.free_list);
|
|
|
|
if (unlikely(freed))
|
|
__i915_gem_free_objects(i915, freed);
|
|
}
|
|
|
|
static void __i915_gem_free_work(struct work_struct *work)
|
|
{
|
|
struct drm_i915_private *i915 =
|
|
container_of(work, struct drm_i915_private, mm.free_work.work);
|
|
|
|
i915_gem_flush_free_objects(i915);
|
|
}
|
|
|
|
static void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|
{
|
|
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
|
|
|
|
/*
|
|
* Before we free the object, make sure any pure RCU-only
|
|
* read-side critical sections are complete, e.g.
|
|
* i915_gem_busy_ioctl(). For the corresponding synchronized
|
|
* lookup see i915_gem_object_lookup_rcu().
|
|
*/
|
|
atomic_inc(&i915->mm.free_count);
|
|
|
|
/*
|
|
* Since we require blocking on struct_mutex to unbind the freed
|
|
* object from the GPU before releasing resources back to the
|
|
* system, we can not do that directly from the RCU callback (which may
|
|
* be a softirq context), but must instead then defer that work onto a
|
|
* kthread. We use the RCU callback rather than move the freed object
|
|
* directly onto the work queue so that we can mix between using the
|
|
* worker and performing frees directly from subsequent allocations for
|
|
* crude but effective memory throttling.
|
|
*/
|
|
|
|
if (llist_add(&obj->freed, &i915->mm.free_list))
|
|
queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
|
|
}
|
|
|
|
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
|
|
enum fb_op_origin origin)
|
|
{
|
|
struct intel_frontbuffer *front;
|
|
|
|
front = __intel_frontbuffer_get(obj);
|
|
if (front) {
|
|
intel_frontbuffer_flush(front, origin);
|
|
intel_frontbuffer_put(front);
|
|
}
|
|
}
|
|
|
|
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
|
|
enum fb_op_origin origin)
|
|
{
|
|
struct intel_frontbuffer *front;
|
|
|
|
front = __intel_frontbuffer_get(obj);
|
|
if (front) {
|
|
intel_frontbuffer_invalidate(front, origin);
|
|
intel_frontbuffer_put(front);
|
|
}
|
|
}
|
|
|
|
static void
|
|
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
|
|
{
|
|
void *src_map;
|
|
void *src_ptr;
|
|
|
|
src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
|
|
|
|
src_ptr = src_map + offset_in_page(offset);
|
|
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
|
drm_clflush_virt_range(src_ptr, size);
|
|
memcpy(dst, src_ptr, size);
|
|
|
|
kunmap_atomic(src_map);
|
|
}
|
|
|
|
static void
|
|
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
|
|
{
|
|
void __iomem *src_map;
|
|
void __iomem *src_ptr;
|
|
dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
|
|
|
|
src_map = io_mapping_map_wc(&obj->mm.region->iomap,
|
|
dma - obj->mm.region->region.start,
|
|
PAGE_SIZE);
|
|
|
|
src_ptr = src_map + offset_in_page(offset);
|
|
if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
|
|
memcpy_fromio(dst, src_ptr, size);
|
|
|
|
io_mapping_unmap(src_map);
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_read_from_page - read data from the page of a GEM object
|
|
* @obj: GEM object to read from
|
|
* @offset: offset within the object
|
|
* @dst: buffer to store the read data
|
|
* @size: size to read
|
|
*
|
|
* Reads data from @obj at the specified offset. The requested region to read
|
|
* from can't cross a page boundary. The caller must ensure that @obj pages
|
|
* are pinned and that @obj is synced wrt. any related writes.
|
|
*
|
|
* Return: %0 on success or -ENODEV if the type of @obj's backing store is
|
|
* unsupported.
|
|
*/
|
|
int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
|
|
{
|
|
GEM_BUG_ON(offset >= obj->base.size);
|
|
GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
|
|
if (i915_gem_object_has_struct_page(obj))
|
|
i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
|
|
else if (i915_gem_object_has_iomem(obj))
|
|
i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
|
|
else
|
|
return -ENODEV;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_evictable - Whether object is likely evictable after unbind.
|
|
* @obj: The object to check
|
|
*
|
|
* This function checks whether the object is likely unvictable after unbind.
|
|
* If the object is not locked when checking, the result is only advisory.
|
|
* If the object is locked when checking, and the function returns true,
|
|
* then an eviction should indeed be possible. But since unlocked vma
|
|
* unpinning and unbinding is currently possible, the object can actually
|
|
* become evictable even if this function returns false.
|
|
*
|
|
* Return: true if the object may be evictable. False otherwise.
|
|
*/
|
|
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct i915_vma *vma;
|
|
int pin_count = atomic_read(&obj->mm.pages_pin_count);
|
|
|
|
if (!pin_count)
|
|
return true;
|
|
|
|
spin_lock(&obj->vma.lock);
|
|
list_for_each_entry(vma, &obj->vma.list, obj_link) {
|
|
if (i915_vma_is_pinned(vma)) {
|
|
spin_unlock(&obj->vma.lock);
|
|
return false;
|
|
}
|
|
if (atomic_read(&vma->pages_count))
|
|
pin_count--;
|
|
}
|
|
spin_unlock(&obj->vma.lock);
|
|
GEM_WARN_ON(pin_count < 0);
|
|
|
|
return pin_count == 0;
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_migratable - Whether the object is migratable out of the
|
|
* current region.
|
|
* @obj: Pointer to the object.
|
|
*
|
|
* Return: Whether the object is allowed to be resident in other
|
|
* regions than the current while pages are present.
|
|
*/
|
|
bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
|
|
{
|
|
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
|
|
|
|
if (!mr)
|
|
return false;
|
|
|
|
return obj->mm.n_placements > 1;
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_has_struct_page - Whether the object is page-backed
|
|
* @obj: The object to query.
|
|
*
|
|
* This function should only be called while the object is locked or pinned,
|
|
* otherwise the page backing may change under the caller.
|
|
*
|
|
* Return: True if page-backed, false otherwise.
|
|
*/
|
|
bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
|
{
|
|
#ifdef CONFIG_LOCKDEP
|
|
if (IS_DGFX(to_i915(obj->base.dev)) &&
|
|
i915_gem_object_evictable((void __force *)obj))
|
|
assert_object_held_shared(obj);
|
|
#endif
|
|
return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_has_iomem - Whether the object is iomem-backed
|
|
* @obj: The object to query.
|
|
*
|
|
* This function should only be called while the object is locked or pinned,
|
|
* otherwise the iomem backing may change under the caller.
|
|
*
|
|
* Return: True if iomem-backed, false otherwise.
|
|
*/
|
|
bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
|
|
{
|
|
#ifdef CONFIG_LOCKDEP
|
|
if (IS_DGFX(to_i915(obj->base.dev)) &&
|
|
i915_gem_object_evictable((void __force *)obj))
|
|
assert_object_held_shared(obj);
|
|
#endif
|
|
return obj->mem_flags & I915_BO_FLAG_IOMEM;
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_can_migrate - Whether an object likely can be migrated
|
|
*
|
|
* @obj: The object to migrate
|
|
* @id: The region intended to migrate to
|
|
*
|
|
* Check whether the object backend supports migration to the
|
|
* given region. Note that pinning may affect the ability to migrate as
|
|
* returned by this function.
|
|
*
|
|
* This function is primarily intended as a helper for checking the
|
|
* possibility to migrate objects and might be slightly less permissive
|
|
* than i915_gem_object_migrate() when it comes to objects with the
|
|
* I915_BO_ALLOC_USER flag set.
|
|
*
|
|
* Return: true if migration is possible, false otherwise.
|
|
*/
|
|
bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
|
|
enum intel_region_id id)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
unsigned int num_allowed = obj->mm.n_placements;
|
|
struct intel_memory_region *mr;
|
|
unsigned int i;
|
|
|
|
GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
|
|
GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
|
|
|
|
mr = i915->mm.regions[id];
|
|
if (!mr)
|
|
return false;
|
|
|
|
if (obj->mm.region == mr)
|
|
return true;
|
|
|
|
if (!i915_gem_object_evictable(obj))
|
|
return false;
|
|
|
|
if (!obj->ops->migrate)
|
|
return false;
|
|
|
|
if (!(obj->flags & I915_BO_ALLOC_USER))
|
|
return true;
|
|
|
|
if (num_allowed == 0)
|
|
return false;
|
|
|
|
for (i = 0; i < num_allowed; ++i) {
|
|
if (mr == obj->mm.placements[i])
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_migrate - Migrate an object to the desired region id
|
|
* @obj: The object to migrate.
|
|
* @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
|
|
* not be successful in evicting other objects to make room for this object.
|
|
* @id: The region id to migrate to.
|
|
*
|
|
* Attempt to migrate the object to the desired memory region. The
|
|
* object backend must support migration and the object may not be
|
|
* pinned, (explicitly pinned pages or pinned vmas). The object must
|
|
* be locked.
|
|
* On successful completion, the object will have pages pointing to
|
|
* memory in the new region, but an async migration task may not have
|
|
* completed yet, and to accomplish that, i915_gem_object_wait_migration()
|
|
* must be called.
|
|
*
|
|
* Note: the @ww parameter is not used yet, but included to make sure
|
|
* callers put some effort into obtaining a valid ww ctx if one is
|
|
* available.
|
|
*
|
|
* Return: 0 on success. Negative error code on failure. In particular may
|
|
* return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
|
|
* if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
|
|
* -EBUSY if the object is pinned.
|
|
*/
|
|
int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
|
|
struct i915_gem_ww_ctx *ww,
|
|
enum intel_region_id id)
|
|
{
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
struct intel_memory_region *mr;
|
|
|
|
GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
|
|
GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
|
|
assert_object_held(obj);
|
|
|
|
mr = i915->mm.regions[id];
|
|
GEM_BUG_ON(!mr);
|
|
|
|
if (!i915_gem_object_can_migrate(obj, id))
|
|
return -EINVAL;
|
|
|
|
if (!obj->ops->migrate) {
|
|
if (GEM_WARN_ON(obj->mm.region != mr))
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
return obj->ops->migrate(obj, mr);
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_placement_possible - Check whether the object can be
|
|
* placed at certain memory type
|
|
* @obj: Pointer to the object
|
|
* @type: The memory type to check
|
|
*
|
|
* Return: True if the object can be placed in @type. False otherwise.
|
|
*/
|
|
bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
|
|
enum intel_memory_type type)
|
|
{
|
|
unsigned int i;
|
|
|
|
if (!obj->mm.n_placements) {
|
|
switch (type) {
|
|
case INTEL_MEMORY_LOCAL:
|
|
return i915_gem_object_has_iomem(obj);
|
|
case INTEL_MEMORY_SYSTEM:
|
|
return i915_gem_object_has_pages(obj);
|
|
default:
|
|
/* Ignore stolen for now */
|
|
GEM_BUG_ON(1);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
for (i = 0; i < obj->mm.n_placements; i++) {
|
|
if (obj->mm.placements[i]->type == type)
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
void i915_gem_init__objects(struct drm_i915_private *i915)
|
|
{
|
|
INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
|
|
}
|
|
|
|
void i915_objects_module_exit(void)
|
|
{
|
|
kmem_cache_destroy(slab_objects);
|
|
}
|
|
|
|
int __init i915_objects_module_init(void)
|
|
{
|
|
slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
|
|
if (!slab_objects)
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct drm_gem_object_funcs i915_gem_object_funcs = {
|
|
.free = i915_gem_free_object,
|
|
.close = i915_gem_close_object,
|
|
.export = i915_gem_prime_export,
|
|
};
|
|
|
|
/**
|
|
* i915_gem_object_get_moving_fence - Get the object's moving fence if any
|
|
* @obj: The object whose moving fence to get.
|
|
*
|
|
* A non-signaled moving fence means that there is an async operation
|
|
* pending on the object that needs to be waited on before setting up
|
|
* any GPU- or CPU PTEs to the object's pages.
|
|
*
|
|
* Return: A refcounted pointer to the object's moving fence if any,
|
|
* NULL otherwise.
|
|
*/
|
|
struct dma_fence *
|
|
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
|
|
{
|
|
return dma_fence_get(i915_gem_to_ttm(obj)->moving);
|
|
}
|
|
|
|
void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
|
|
struct dma_fence *fence)
|
|
{
|
|
struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
|
|
|
|
if (*moving == fence)
|
|
return;
|
|
|
|
dma_fence_put(*moving);
|
|
*moving = dma_fence_get(fence);
|
|
}
|
|
|
|
/**
|
|
* i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
|
|
* @obj: The object whose moving fence to wait for.
|
|
* @intr: Whether to wait interruptible.
|
|
*
|
|
* If the moving fence signaled without an error, it is detached from the
|
|
* object and put.
|
|
*
|
|
* Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
|
|
* negative error code if the async operation represented by the
|
|
* moving fence failed.
|
|
*/
|
|
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
|
|
bool intr)
|
|
{
|
|
struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
|
|
int ret;
|
|
|
|
assert_object_held(obj);
|
|
if (!fence)
|
|
return 0;
|
|
|
|
ret = dma_fence_wait(fence, intr);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (fence->error)
|
|
return fence->error;
|
|
|
|
i915_gem_to_ttm(obj)->moving = NULL;
|
|
dma_fence_put(fence);
|
|
return 0;
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
#include "selftests/huge_gem_object.c"
|
|
#include "selftests/huge_pages.c"
|
|
#include "selftests/i915_gem_migrate.c"
|
|
#include "selftests/i915_gem_object.c"
|
|
#include "selftests/i915_gem_coherency.c"
|
|
#endif
|