iommu/arm-smmu: Pass arm_smmu_domain to internal functions
Keep the types consistent, all the callers of these functions already have obtained a struct arm_smmu_domain, don't needlessly go to/from an iommu_domain through the internal call chains. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/4-v2-c86cc8c2230e+160bb-smmu_newapi_jgg@nvidia.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
bbbf11eea3
commit
e0976331ad
1 changed files with 10 additions and 12 deletions
|
@ -409,8 +409,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||||
{
|
{
|
||||||
u32 fsr, fsynr, cbfrsynra;
|
u32 fsr, fsynr, cbfrsynra;
|
||||||
unsigned long iova;
|
unsigned long iova;
|
||||||
struct iommu_domain *domain = dev;
|
struct arm_smmu_domain *smmu_domain = dev;
|
||||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
int idx = smmu_domain->cfg.cbndx;
|
int idx = smmu_domain->cfg.cbndx;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -423,7 +422,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||||
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
|
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
|
||||||
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
|
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
|
||||||
|
|
||||||
ret = report_iommu_fault(domain, NULL, iova,
|
ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
|
||||||
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
|
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
|
||||||
|
|
||||||
if (ret == -ENOSYS)
|
if (ret == -ENOSYS)
|
||||||
|
@ -624,7 +623,7 @@ static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
|
||||||
return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
|
return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
|
||||||
struct arm_smmu_device *smmu,
|
struct arm_smmu_device *smmu,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -633,7 +632,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||||
struct io_pgtable_ops *pgtbl_ops;
|
struct io_pgtable_ops *pgtbl_ops;
|
||||||
struct io_pgtable_cfg pgtbl_cfg;
|
struct io_pgtable_cfg pgtbl_cfg;
|
||||||
enum io_pgtable_fmt fmt;
|
enum io_pgtable_fmt fmt;
|
||||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
struct iommu_domain *domain = &smmu_domain->domain;
|
||||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||||
irqreturn_t (*context_fault)(int irq, void *dev);
|
irqreturn_t (*context_fault)(int irq, void *dev);
|
||||||
|
|
||||||
|
@ -807,8 +806,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||||
else
|
else
|
||||||
context_fault = arm_smmu_context_fault;
|
context_fault = arm_smmu_context_fault;
|
||||||
|
|
||||||
ret = devm_request_irq(smmu->dev, irq, context_fault,
|
ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
|
||||||
IRQF_SHARED, "arm-smmu-context-fault", domain);
|
"arm-smmu-context-fault", smmu_domain);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
|
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
|
||||||
cfg->irptndx, irq);
|
cfg->irptndx, irq);
|
||||||
|
@ -829,9 +828,8 @@ out_unlock:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||||
int ret, irq;
|
int ret, irq;
|
||||||
|
@ -852,7 +850,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
||||||
|
|
||||||
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
|
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
|
||||||
irq = smmu->irqs[cfg->irptndx];
|
irq = smmu->irqs[cfg->irptndx];
|
||||||
devm_free_irq(smmu->dev, irq, domain);
|
devm_free_irq(smmu->dev, irq, smmu_domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
|
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
|
||||||
|
@ -892,7 +890,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
|
||||||
* Free the domain resources. We assume that all devices have
|
* Free the domain resources. We assume that all devices have
|
||||||
* already been detached.
|
* already been detached.
|
||||||
*/
|
*/
|
||||||
arm_smmu_destroy_domain_context(domain);
|
arm_smmu_destroy_domain_context(smmu_domain);
|
||||||
kfree(smmu_domain);
|
kfree(smmu_domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1142,7 +1140,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Ensure that the domain is finalised */
|
/* Ensure that the domain is finalised */
|
||||||
ret = arm_smmu_init_domain_context(domain, smmu, dev);
|
ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto rpm_put;
|
goto rpm_put;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue