drm/msm: replace MSM_BO_UNCACHED with MSM_BO_WC for internal objects
msm_gem_get_vaddr() currently always maps as writecombine, so use the right flag instead of relying on broken behavior (things don't actually work if they are mapped as uncached). Signed-off-by: Jonathan Marek <jonathan@marek.ca> Acked-by: Jordan Crouse <jordan@cosmicpenguin.net> Link: https://lore.kernel.org/r/20210423190833.25319-3-jonathan@marek.ca Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
parent
8eaf9b02ac
commit
a5fc7aa901
7 changed files with 9 additions and 9 deletions
|
@ -902,7 +902,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||
if (!a5xx_gpu->shadow_bo) {
|
||||
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
|
||||
sizeof(u32) * gpu->nr_rings,
|
||||
MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
|
||||
MSM_BO_WC | MSM_BO_MAP_PRIV,
|
||||
gpu->aspace, &a5xx_gpu->shadow_bo,
|
||||
&a5xx_gpu->shadow_iova);
|
||||
|
||||
|
@ -1407,7 +1407,7 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
|
|||
struct a5xx_crashdumper *dumper)
|
||||
{
|
||||
dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
|
||||
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
|
||||
SZ_1M, MSM_BO_WC, gpu->aspace,
|
||||
&dumper->bo, &dumper->iova);
|
||||
|
||||
if (!IS_ERR(dumper->ptr))
|
||||
|
|
|
@ -363,7 +363,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
|
|||
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
|
||||
|
||||
ptr = msm_gem_kernel_new_locked(drm, bosize,
|
||||
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
|
||||
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
|
||||
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
|
||||
if (IS_ERR(ptr))
|
||||
return;
|
||||
|
|
|
@ -230,7 +230,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
|
|||
|
||||
ptr = msm_gem_kernel_new(gpu->dev,
|
||||
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
|
||||
MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
|
||||
MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
|
||||
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
|
@ -238,7 +238,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
|
|||
/* The buffer to store counters needs to be unprivileged */
|
||||
counters = msm_gem_kernel_new(gpu->dev,
|
||||
A5XX_PREEMPT_COUNTER_SIZE,
|
||||
MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
|
||||
MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
|
||||
if (IS_ERR(counters)) {
|
||||
msm_gem_kernel_put(bo, gpu->aspace, true);
|
||||
return PTR_ERR(counters);
|
||||
|
|
|
@ -852,7 +852,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
|||
if (!a6xx_gpu->shadow_bo) {
|
||||
a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
|
||||
sizeof(u32) * gpu->nr_rings,
|
||||
MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
|
||||
MSM_BO_WC | MSM_BO_MAP_PRIV,
|
||||
gpu->aspace, &a6xx_gpu->shadow_bo,
|
||||
&a6xx_gpu->shadow_iova);
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ static int a6xx_crashdumper_init(struct msm_gpu *gpu,
|
|||
struct a6xx_crashdumper *dumper)
|
||||
{
|
||||
dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
|
||||
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
|
||||
SZ_1M, MSM_BO_WC, gpu->aspace,
|
||||
&dumper->bo, &dumper->iova);
|
||||
|
||||
if (!IS_ERR(dumper->ptr))
|
||||
|
|
|
@ -391,7 +391,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
|
|||
void *ptr;
|
||||
|
||||
ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
|
||||
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
|
||||
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
|
||||
|
||||
if (IS_ERR(ptr))
|
||||
return ERR_CAST(ptr);
|
||||
|
|
|
@ -1093,7 +1093,7 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
|
|||
uint64_t iova;
|
||||
u8 *data;
|
||||
|
||||
data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
|
||||
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
|
||||
priv->kms->aspace,
|
||||
&msm_host->tx_gem_obj, &iova);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue