iommu: Add IOMMU SVA domain support
The SVA iommu_domain represents a hardware pagetable that the IOMMU hardware could use for SVA translation. This adds some infrastructures to support SVA domain in the iommu core. It includes: - Extend the iommu_domain to support a new IOMMU_DOMAIN_SVA domain type. The IOMMU drivers that support allocation of the SVA domain should provide its own SVA domain specific iommu_domain_ops. - Add a helper to allocate an SVA domain. The iommu_domain_free() is still used to free an SVA domain. The report_iommu_fault() should be replaced by the new iommu_report_device_fault(). Leave the existing fault handler with the existing users and the newly added SVA members excludes it. Suggested-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org> Tested-by: Tony Zhu <tony.zhu@intel.com> Link: https://lore.kernel.org/r/20221031005917.45690-7-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
1660370455
commit
136467962e
2 changed files with 43 additions and 2 deletions
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/cc_platform.h>
|
#include <linux/cc_platform.h>
|
||||||
#include <trace/events/iommu.h>
|
#include <trace/events/iommu.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
|
|
||||||
#include "dma-iommu.h"
|
#include "dma-iommu.h"
|
||||||
|
|
||||||
|
@ -1934,6 +1935,8 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
|
||||||
|
|
||||||
void iommu_domain_free(struct iommu_domain *domain)
|
void iommu_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
|
if (domain->type == IOMMU_DOMAIN_SVA)
|
||||||
|
mmdrop(domain->mm);
|
||||||
iommu_put_dma_cookie(domain);
|
iommu_put_dma_cookie(domain);
|
||||||
domain->ops->free(domain);
|
domain->ops->free(domain);
|
||||||
}
|
}
|
||||||
|
@ -3383,3 +3386,20 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
|
||||||
return domain;
|
return domain;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
|
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
|
||||||
|
|
||||||
|
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||||
|
struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||||
|
struct iommu_domain *domain;
|
||||||
|
|
||||||
|
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
||||||
|
if (!domain)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
domain->type = IOMMU_DOMAIN_SVA;
|
||||||
|
mmgrab(mm);
|
||||||
|
domain->mm = mm;
|
||||||
|
|
||||||
|
return domain;
|
||||||
|
}
|
||||||
|
|
|
@ -64,6 +64,8 @@ struct iommu_domain_geometry {
|
||||||
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
|
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
|
||||||
#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
|
#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
|
||||||
|
|
||||||
|
#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This are the possible domain-types
|
* This are the possible domain-types
|
||||||
*
|
*
|
||||||
|
@ -77,6 +79,8 @@ struct iommu_domain_geometry {
|
||||||
* certain optimizations for these domains
|
* certain optimizations for these domains
|
||||||
* IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
|
* IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
|
||||||
* invalidation.
|
* invalidation.
|
||||||
|
* IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
|
||||||
|
* represented by mm_struct's.
|
||||||
*/
|
*/
|
||||||
#define IOMMU_DOMAIN_BLOCKED (0U)
|
#define IOMMU_DOMAIN_BLOCKED (0U)
|
||||||
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
|
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
|
||||||
|
@ -86,15 +90,24 @@ struct iommu_domain_geometry {
|
||||||
#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
|
#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
|
||||||
__IOMMU_DOMAIN_DMA_API | \
|
__IOMMU_DOMAIN_DMA_API | \
|
||||||
__IOMMU_DOMAIN_DMA_FQ)
|
__IOMMU_DOMAIN_DMA_FQ)
|
||||||
|
#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
|
||||||
|
|
||||||
struct iommu_domain {
|
struct iommu_domain {
|
||||||
unsigned type;
|
unsigned type;
|
||||||
const struct iommu_domain_ops *ops;
|
const struct iommu_domain_ops *ops;
|
||||||
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
||||||
iommu_fault_handler_t handler;
|
|
||||||
void *handler_token;
|
|
||||||
struct iommu_domain_geometry geometry;
|
struct iommu_domain_geometry geometry;
|
||||||
struct iommu_dma_cookie *iova_cookie;
|
struct iommu_dma_cookie *iova_cookie;
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
iommu_fault_handler_t handler;
|
||||||
|
void *handler_token;
|
||||||
|
};
|
||||||
|
struct { /* IOMMU_DOMAIN_SVA */
|
||||||
|
struct mm_struct *mm;
|
||||||
|
int users;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
|
static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
|
||||||
|
@ -685,6 +698,8 @@ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
|
||||||
void iommu_group_release_dma_owner(struct iommu_group *group);
|
void iommu_group_release_dma_owner(struct iommu_group *group);
|
||||||
bool iommu_group_dma_owner_claimed(struct iommu_group *group);
|
bool iommu_group_dma_owner_claimed(struct iommu_group *group);
|
||||||
|
|
||||||
|
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||||
|
struct mm_struct *mm);
|
||||||
int iommu_attach_device_pasid(struct iommu_domain *domain,
|
int iommu_attach_device_pasid(struct iommu_domain *domain,
|
||||||
struct device *dev, ioasid_t pasid);
|
struct device *dev, ioasid_t pasid);
|
||||||
void iommu_detach_device_pasid(struct iommu_domain *domain,
|
void iommu_detach_device_pasid(struct iommu_domain *domain,
|
||||||
|
@ -1055,6 +1070,12 @@ static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct iommu_domain *
|
||||||
|
iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
|
static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
|
||||||
struct device *dev, ioasid_t pasid)
|
struct device *dev, ioasid_t pasid)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Reference in a new issue