1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
Matthew Auld bfe53be268 drm/i915/ttm: handle blitter failure on DG2
If the move or clear operation somehow fails, and the memory underneath
is not cleared, like when moving to lmem, then we currently fallback to
memcpy or memset. However with small-BAR systems this fallback might no
longer be possible. For now we use the set_wedged sledgehammer if we
ever encounter such a scenario, and mark the object as borked to plug
any holes where access to the memory underneath can happen. Add some
basic selftests to exercise this.

v2:
  - In the selftests make sure we grab the runtime pm around the reset.
    Also make sure we grab the reset lock before checking if the device
    is wedged, since the wedge might still be in-progress and hence the
    bit might not be set yet.
  - Don't wedge or put the object into an unknown state, if the request
    construction fails (or similar). Just returning an error and
    skipping the fallback should be safe here.
  - Make sure we wedge each gt. (Thomas)
  - Peek at the unknown_state in io_reserve, that way we don't have to
    export or hand roll the fault_wait_for_idle. (Thomas)
  - Add the missing read-side barriers for the unknown_state. (Thomas)
  - Some kernel-doc fixes. (Thomas)
v3:
  - Tweak the ordering of the set_wedged, also add FIXME.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220629174350.384910-11-matthew.auld@intel.com
2022-07-01 08:30:00 +01:00

98 lines
2.7 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _I915_GEM_TTM_H_
#define _I915_GEM_TTM_H_
#include <drm/ttm/ttm_placement.h>
#include "gem/i915_gem_object_types.h"
/**
* i915_gem_to_ttm - Convert a struct drm_i915_gem_object to a
* struct ttm_buffer_object.
* @obj: Pointer to the gem object.
*
* Return: Pointer to the embedded struct ttm_buffer_object.
*/
static inline struct ttm_buffer_object *
i915_gem_to_ttm(struct drm_i915_gem_object *obj)
{
return &obj->__do_not_access;
}
/*
* i915 ttm gem object destructor. Internal use only.
*/
void i915_ttm_bo_destroy(struct ttm_buffer_object *bo);
/**
* i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
* struct drm_i915_gem_object.
*
* Return: Pointer to the embedding struct ttm_buffer_object, or NULL
* if the object was not an i915 ttm object.
*/
static inline struct drm_i915_gem_object *
i915_ttm_to_gem(struct ttm_buffer_object *bo)
{
if (bo->destroy != i915_ttm_bo_destroy)
return NULL;
return container_of(bo, struct drm_i915_gem_object, __do_not_access);
}
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
/* Internal I915 TTM declarations and definitions below. */
#define I915_PL_LMEM0 TTM_PL_PRIV
#define I915_PL_SYSTEM TTM_PL_SYSTEM
#define I915_PL_STOLEN TTM_PL_VRAM
#define I915_PL_GGTT TTM_PL_TT
struct ttm_placement *i915_ttm_sys_placement(void);
void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj);
struct i915_refct_sgt *
i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
struct ttm_resource *res);
void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
int i915_ttm_purge(struct drm_i915_gem_object *obj);
/**
* i915_ttm_gtt_binds_lmem - Should the memory be viewed as LMEM by the GTT?
* @mem: struct ttm_resource representing the memory.
*
* Return: true if memory should be viewed as LMEM for GTT binding purposes,
* false otherwise.
*/
static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
{
return mem->mem_type != I915_PL_SYSTEM;
}
/**
* i915_ttm_cpu_maps_iomem - Should the memory be viewed as IOMEM by the CPU?
* @mem: struct ttm_resource representing the memory.
*
* Return: true if memory should be viewed as IOMEM for CPU mapping purposes.
*/
static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
return mem->mem_type != I915_PL_SYSTEM;
}
bool i915_ttm_resource_mappable(struct ttm_resource *res);
#endif