Currently the KMD is using enum i915_cache_level to set caching policy for buffer objects. This is flaky because the PAT index which really controls the caching behavior in PTE has far more levels than what's defined in the enum. In addition, the PAT index is platform dependent, having to translate between i915_cache_level and PAT index is not reliable, and makes the code more complicated. From UMD's perspective there is also a necessity to set caching policy for performance fine tuning. It's much easier for the UMD to directly use PAT index because the behavior of each PAT index is clearly defined in Bspec. Having the abstracted i915_cache_level sitting in between would only cause more ambiguity. PAT is expected to work much like MOCS already works today, and by design userspace is expected to select the index that exactly matches the desired behavior described in the hardware specification. For these reasons this patch replaces i915_cache_level with PAT index. Also note, the cache_level is not completely removed yet, because the KMD still has the need of creating buffer objects with simple cache settings such as cached, uncached, or writethrough. For kernel objects, cache_level is used for simplicity and backward compatibility. For Pre-gen12 platforms PAT can have 1:1 mapping to i915_cache_level, so these two are interchangeable. see the use of LEGACY_CACHELEVEL. One consequence of this change is that gen8_pte_encode is no longer working for gen12 platforms due to the fact that gen12 platforms has different PAT definitions. In the meantime the mtl_pte_encode introduced specfically for MTL becomes generic for all gen12 platforms. This patch renames the MTL PTE encode function into gen12_pte_encode and apply it to all gen12. Even though this change looks unrelated, but separating them would temporarily break gen12 PTE encoding, thus squash them in one patch. Special note: this patch changes the way caching behavior is controlled in the sense that some objects are left to be managed by userspace. For such objects we need to be careful not to change the userspace settings.There are kerneldoc and comments added around obj->cache_coherent, cache_dirty, and how to bypass the checkings by i915_gem_object_has_cache_level. For full understanding, these changes need to be looked at together with the two follow-up patches, one disables the {set|get}_caching ioctl's and the other adds set_pat extension to the GEM_CREATE uAPI. Bspec: 63019 Cc: Chris Wilson <chris.p.wilson@linux.intel.com> Signed-off-by: Fei Yang <fei.yang@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230509165200.1740-3-fei.yang@intel.com
136 lines
3.8 KiB
C
136 lines
3.8 KiB
C
/*
|
|
* Copyright © 2016 Intel Corporation
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
* IN THE SOFTWARE.
|
|
*
|
|
*/
|
|
|
|
#include "mock_gtt.h"
|
|
|
|
static void mock_insert_page(struct i915_address_space *vm,
|
|
dma_addr_t addr,
|
|
u64 offset,
|
|
unsigned int pat_index,
|
|
u32 flags)
|
|
{
|
|
}
|
|
|
|
static void mock_insert_entries(struct i915_address_space *vm,
|
|
struct i915_vma_resource *vma_res,
|
|
unsigned int pat_index, u32 flags)
|
|
{
|
|
}
|
|
|
|
static void mock_bind_ppgtt(struct i915_address_space *vm,
|
|
struct i915_vm_pt_stash *stash,
|
|
struct i915_vma_resource *vma_res,
|
|
unsigned int pat_index,
|
|
u32 flags)
|
|
{
|
|
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
|
|
vma_res->bound_flags |= flags;
|
|
}
|
|
|
|
static void mock_unbind_ppgtt(struct i915_address_space *vm,
|
|
struct i915_vma_resource *vma_res)
|
|
{
|
|
}
|
|
|
|
static void mock_cleanup(struct i915_address_space *vm)
|
|
{
|
|
}
|
|
|
|
static void mock_clear_range(struct i915_address_space *vm,
|
|
u64 start, u64 length)
|
|
{
|
|
}
|
|
|
|
struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
|
|
{
|
|
struct i915_ppgtt *ppgtt;
|
|
|
|
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
|
if (!ppgtt)
|
|
return NULL;
|
|
|
|
ppgtt->vm.gt = to_gt(i915);
|
|
ppgtt->vm.i915 = i915;
|
|
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
|
|
ppgtt->vm.dma = i915->drm.dev;
|
|
|
|
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
|
|
|
|
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
|
|
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
|
|
|
|
ppgtt->vm.clear_range = mock_clear_range;
|
|
ppgtt->vm.insert_page = mock_insert_page;
|
|
ppgtt->vm.insert_entries = mock_insert_entries;
|
|
ppgtt->vm.cleanup = mock_cleanup;
|
|
|
|
ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
|
|
ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
|
|
|
|
return ppgtt;
|
|
}
|
|
|
|
static void mock_bind_ggtt(struct i915_address_space *vm,
|
|
struct i915_vm_pt_stash *stash,
|
|
struct i915_vma_resource *vma_res,
|
|
unsigned int pat_index,
|
|
u32 flags)
|
|
{
|
|
}
|
|
|
|
static void mock_unbind_ggtt(struct i915_address_space *vm,
|
|
struct i915_vma_resource *vma_res)
|
|
{
|
|
}
|
|
|
|
void mock_init_ggtt(struct intel_gt *gt)
|
|
{
|
|
struct i915_ggtt *ggtt = gt->ggtt;
|
|
|
|
ggtt->vm.gt = gt;
|
|
ggtt->vm.i915 = gt->i915;
|
|
ggtt->vm.is_ggtt = true;
|
|
|
|
ggtt->gmadr = DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
|
|
ggtt->mappable_end = resource_size(&ggtt->gmadr);
|
|
ggtt->vm.total = 4096 * PAGE_SIZE;
|
|
|
|
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
|
|
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
|
|
|
|
ggtt->vm.clear_range = mock_clear_range;
|
|
ggtt->vm.insert_page = mock_insert_page;
|
|
ggtt->vm.insert_entries = mock_insert_entries;
|
|
ggtt->vm.cleanup = mock_cleanup;
|
|
|
|
ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
|
|
ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
|
|
|
|
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
|
|
}
|
|
|
|
void mock_fini_ggtt(struct i915_ggtt *ggtt)
|
|
{
|
|
i915_address_space_fini(&ggtt->vm);
|
|
}
|