drm/msm/adreno: stall translation on fault for all GPU families
The commit e25e92e08e
("drm/msm: devcoredump iommu fault support")
enabled SMMU stalling to collect GPU state, but only for a6xx. It tied
enabling the stall with tha per-instance pagetables creation.
Since that commit SoCs with a5xx also gained support for
adreno-smmu-priv. Move stalling into generic code and add corresponding
resume_translation calls.
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Patchwork: https://patchwork.freedesktop.org/patch/522720/
Link: https://lore.kernel.org/r/20230214123504.3729522-2-dmitry.baryshkov@linaro.org
Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
parent
52ff0d3073
commit
8cceb773f5
4 changed files with 31 additions and 12 deletions
|
@ -1103,6 +1103,8 @@ static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *da
|
||||||
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
|
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
|
||||||
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
|
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
|
||||||
|
|
||||||
|
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,7 +208,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||||
struct msm_gem_address_space *aspace;
|
struct msm_gem_address_space *aspace;
|
||||||
u64 start, size;
|
u64 start, size;
|
||||||
|
|
||||||
mmu = msm_iommu_new(&pdev->dev, quirks);
|
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
|
||||||
if (IS_ERR_OR_NULL(mmu))
|
if (IS_ERR_OR_NULL(mmu))
|
||||||
return ERR_CAST(mmu);
|
return ERR_CAST(mmu);
|
||||||
|
|
||||||
|
|
|
@ -237,13 +237,6 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
|
||||||
if (!ttbr1_cfg)
|
if (!ttbr1_cfg)
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
/*
|
|
||||||
* Defer setting the fault handler until we have a valid adreno_smmu
|
|
||||||
* to avoid accidentially installing a GPU specific fault handler for
|
|
||||||
* the display's iommu
|
|
||||||
*/
|
|
||||||
iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
|
|
||||||
|
|
||||||
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
|
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
|
||||||
if (!pagetable)
|
if (!pagetable)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
@ -271,9 +264,6 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
|
||||||
* the arm-smmu driver as a trigger to set up TTBR0
|
* the arm-smmu driver as a trigger to set up TTBR0
|
||||||
*/
|
*/
|
||||||
if (atomic_inc_return(&iommu->pagetables) == 1) {
|
if (atomic_inc_return(&iommu->pagetables) == 1) {
|
||||||
/* Enable stall on iommu fault: */
|
|
||||||
adreno_smmu->set_stall(adreno_smmu->cookie, true);
|
|
||||||
|
|
||||||
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
|
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
free_io_pgtable_ops(pagetable->pgtbl_ops);
|
free_io_pgtable_ops(pagetable->pgtbl_ops);
|
||||||
|
@ -302,6 +292,7 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
|
||||||
unsigned long iova, int flags, void *arg)
|
unsigned long iova, int flags, void *arg)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = arg;
|
struct msm_iommu *iommu = arg;
|
||||||
|
struct msm_mmu *mmu = &iommu->base;
|
||||||
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
|
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
|
||||||
struct adreno_smmu_fault_info info, *ptr = NULL;
|
struct adreno_smmu_fault_info info, *ptr = NULL;
|
||||||
|
|
||||||
|
@ -314,6 +305,10 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
|
||||||
return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
|
return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
|
||||||
|
|
||||||
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
|
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
|
||||||
|
|
||||||
|
if (mmu->funcs->resume_translation)
|
||||||
|
mmu->funcs->resume_translation(mmu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -321,6 +316,7 @@ static void msm_iommu_resume_translation(struct msm_mmu *mmu)
|
||||||
{
|
{
|
||||||
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
|
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
|
||||||
|
|
||||||
|
if (adreno_smmu->resume_translation)
|
||||||
adreno_smmu->resume_translation(adreno_smmu->cookie, true);
|
adreno_smmu->resume_translation(adreno_smmu->cookie, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,3 +402,23 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
|
||||||
|
|
||||||
return &iommu->base;
|
return &iommu->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
|
||||||
|
{
|
||||||
|
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
|
||||||
|
struct msm_iommu *iommu;
|
||||||
|
struct msm_mmu *mmu;
|
||||||
|
|
||||||
|
mmu = msm_iommu_new(dev, quirks);
|
||||||
|
if (IS_ERR(mmu))
|
||||||
|
return mmu;
|
||||||
|
|
||||||
|
iommu = to_msm_iommu(mmu);
|
||||||
|
iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
|
||||||
|
|
||||||
|
/* Enable stall on iommu fault: */
|
||||||
|
if (adreno_smmu->set_stall)
|
||||||
|
adreno_smmu->set_stall(adreno_smmu->cookie, true);
|
||||||
|
|
||||||
|
return mmu;
|
||||||
|
}
|
||||||
|
|
|
@ -41,6 +41,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
|
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
|
||||||
|
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
|
||||||
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
|
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
|
||||||
|
|
||||||
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
|
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
|
||||||
|
|
Loading…
Add table
Reference in a new issue