iommu/vt-d: Remove unnecessary SVA data accesses in page fault path
The existing I/O page fault handling code accesses the per-PASID SVA data structures. This is unnecessary and makes the fault handling code only suitable for SVA scenarios. This removes the SVA data accesses from the I/O page fault reporting and responding code, so that the fault handling code could be generic. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20220914011821.400986-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
f76349cf41
commit
06f4b8d09d
2 changed files with 8 additions and 54 deletions
|
@ -586,6 +586,7 @@ struct intel_iommu {
|
||||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||||
struct page_req_dsc *prq;
|
struct page_req_dsc *prq;
|
||||||
unsigned char prq_name[16]; /* Name for PRQ interrupt */
|
unsigned char prq_name[16]; /* Name for PRQ interrupt */
|
||||||
|
unsigned long prq_seq_number;
|
||||||
struct completion prq_complete;
|
struct completion prq_complete;
|
||||||
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
|
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
|
||||||
#endif
|
#endif
|
||||||
|
@ -761,7 +762,6 @@ struct intel_svm_dev {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
struct iommu_sva sva;
|
struct iommu_sva sva;
|
||||||
unsigned long prq_seq_number;
|
|
||||||
u32 pasid;
|
u32 pasid;
|
||||||
int users;
|
int users;
|
||||||
u16 did;
|
u16 did;
|
||||||
|
|
|
@ -48,23 +48,6 @@ static void *pasid_private_find(ioasid_t pasid)
|
||||||
return xa_load(&pasid_private_array, pasid);
|
return xa_load(&pasid_private_array, pasid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct intel_svm_dev *
|
|
||||||
svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
|
|
||||||
{
|
|
||||||
struct intel_svm_dev *sdev = NULL, *t;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
list_for_each_entry_rcu(t, &svm->devs, list) {
|
|
||||||
if (t->sid == sid) {
|
|
||||||
sdev = t;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
return sdev;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct intel_svm_dev *
|
static struct intel_svm_dev *
|
||||||
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
|
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -706,11 +689,10 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
|
||||||
|
|
||||||
static irqreturn_t prq_event_thread(int irq, void *d)
|
static irqreturn_t prq_event_thread(int irq, void *d)
|
||||||
{
|
{
|
||||||
struct intel_svm_dev *sdev = NULL;
|
|
||||||
struct intel_iommu *iommu = d;
|
struct intel_iommu *iommu = d;
|
||||||
struct intel_svm *svm = NULL;
|
|
||||||
struct page_req_dsc *req;
|
struct page_req_dsc *req;
|
||||||
int head, tail, handled;
|
int head, tail, handled;
|
||||||
|
struct pci_dev *pdev;
|
||||||
u64 address;
|
u64 address;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -730,8 +712,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||||
pr_err("IOMMU: %s: Page request without PASID\n",
|
pr_err("IOMMU: %s: Page request without PASID\n",
|
||||||
iommu->name);
|
iommu->name);
|
||||||
bad_req:
|
bad_req:
|
||||||
svm = NULL;
|
|
||||||
sdev = NULL;
|
|
||||||
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
||||||
goto prq_advance;
|
goto prq_advance;
|
||||||
}
|
}
|
||||||
|
@ -758,34 +738,19 @@ bad_req:
|
||||||
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
|
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
|
||||||
goto prq_advance;
|
goto prq_advance;
|
||||||
|
|
||||||
if (!svm || svm->pasid != req->pasid) {
|
pdev = pci_get_domain_bus_and_slot(iommu->segment,
|
||||||
/*
|
PCI_BUS_NUM(req->rid),
|
||||||
* It can't go away, because the driver is not permitted
|
req->rid & 0xff);
|
||||||
* to unbind the mm while any page faults are outstanding.
|
|
||||||
*/
|
|
||||||
svm = pasid_private_find(req->pasid);
|
|
||||||
if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
|
|
||||||
goto bad_req;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!sdev || sdev->sid != req->rid) {
|
|
||||||
sdev = svm_lookup_device_by_sid(svm, req->rid);
|
|
||||||
if (!sdev)
|
|
||||||
goto bad_req;
|
|
||||||
}
|
|
||||||
|
|
||||||
sdev->prq_seq_number++;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If prq is to be handled outside iommu driver via receiver of
|
* If prq is to be handled outside iommu driver via receiver of
|
||||||
* the fault notifiers, we skip the page response here.
|
* the fault notifiers, we skip the page response here.
|
||||||
*/
|
*/
|
||||||
if (intel_svm_prq_report(iommu, sdev->dev, req))
|
if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
|
||||||
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
||||||
|
|
||||||
trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1,
|
trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
|
||||||
req->priv_data[0], req->priv_data[1],
|
req->priv_data[0], req->priv_data[1],
|
||||||
sdev->prq_seq_number);
|
iommu->prq_seq_number++);
|
||||||
prq_advance:
|
prq_advance:
|
||||||
head = (head + sizeof(*req)) & PRQ_RING_MASK;
|
head = (head + sizeof(*req)) & PRQ_RING_MASK;
|
||||||
}
|
}
|
||||||
|
@ -881,8 +846,6 @@ int intel_svm_page_response(struct device *dev,
|
||||||
struct iommu_page_response *msg)
|
struct iommu_page_response *msg)
|
||||||
{
|
{
|
||||||
struct iommu_fault_page_request *prm;
|
struct iommu_fault_page_request *prm;
|
||||||
struct intel_svm_dev *sdev = NULL;
|
|
||||||
struct intel_svm *svm = NULL;
|
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
bool private_present;
|
bool private_present;
|
||||||
bool pasid_present;
|
bool pasid_present;
|
||||||
|
@ -901,8 +864,6 @@ int intel_svm_page_response(struct device *dev,
|
||||||
if (!msg || !evt)
|
if (!msg || !evt)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&pasid_mutex);
|
|
||||||
|
|
||||||
prm = &evt->fault.prm;
|
prm = &evt->fault.prm;
|
||||||
sid = PCI_DEVID(bus, devfn);
|
sid = PCI_DEVID(bus, devfn);
|
||||||
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
||||||
|
@ -919,12 +880,6 @@ int intel_svm_page_response(struct device *dev,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
|
|
||||||
if (ret || !sdev) {
|
|
||||||
ret = -ENODEV;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Per VT-d spec. v3.0 ch7.7, system software must respond
|
* Per VT-d spec. v3.0 ch7.7, system software must respond
|
||||||
* with page group response if private data is present (PDP)
|
* with page group response if private data is present (PDP)
|
||||||
|
@ -954,6 +909,5 @@ int intel_svm_page_response(struct device *dev,
|
||||||
qi_submit_sync(iommu, &desc, 1, 0);
|
qi_submit_sync(iommu, &desc, 1, 0);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&pasid_mutex);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue