summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe.brucker@arm.com>2019-06-03 15:57:49 +0100
committerJoerg Roedel <jroedel@suse.de>2019-06-12 10:19:06 +0200
commitbf3255b3cfe2d06280340dbac3f44b65d3ee6da3 (patch)
tree8cc9e258d1fa55083b192700bd103a52d919a9b7 /drivers/iommu/iommu.c
parent0c830e6b32826311fc2b9ea1f4679be0f4ef0933 (diff)
downloadlwn-bf3255b3cfe2d06280340dbac3f44b65d3ee6da3.tar.gz
lwn-bf3255b3cfe2d06280340dbac3f44b65d3ee6da3.zip
iommu: Add recoverable fault reporting
Some IOMMU hardware features, for example PCI PRI and Arm SMMU Stall, enable recoverable I/O page faults. Allow IOMMU drivers to report PRI Page Requests and Stall events through the new fault reporting API. The consumer of the fault can be either an I/O page fault handler in the host, or a guest OS. Once handled, the fault must be completed by sending a page response back to the IOMMU. Add an iommu_page_response() function to complete a page fault. There are two ways to extend the userspace API: * Add a field to iommu_page_response and a flag to iommu_page_response::flags describing the validity of this field. * Introduce a new iommu_page_response_X structure with a different version number. The kernel must then support both versions. Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c94
1 files changed, 92 insertions, 2 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 293a6fa716e0..ac1f29c19e59 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -891,7 +891,14 @@ EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
* @data: private data passed as argument to the handler
*
* When an IOMMU fault event is received, this handler gets called with the
- * fault event and data as argument. The handler should return 0 on success.
+ * fault event and data as argument. The handler should return 0 on success. If
+ * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
+ * complete the fault by calling iommu_page_response() with one of the following
+ * response code:
+ * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
+ * - IOMMU_PAGE_RESP_INVALID: terminate the fault
+ * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
+ * page faults if possible.
*
* Return 0 if the fault handler was installed successfully, or an error.
*/
@@ -921,6 +928,8 @@ int iommu_register_device_fault_handler(struct device *dev,
}
param->fault_param->handler = handler;
param->fault_param->data = data;
+ mutex_init(&param->fault_param->lock);
+ INIT_LIST_HEAD(&param->fault_param->faults);
done_unlock:
mutex_unlock(&param->lock);
@@ -951,6 +960,12 @@ int iommu_unregister_device_fault_handler(struct device *dev)
if (!param->fault_param)
goto unlock;
+ /* we cannot unregister handler if there are pending faults */
+ if (!list_empty(&param->fault_param->faults)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
kfree(param->fault_param);
param->fault_param = NULL;
put_device(dev);
@@ -967,13 +982,15 @@ EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
* @evt: fault event data
*
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
- * handler.
+ * handler. When this function fails and the fault is recoverable, it is the
+ * caller's responsibility to complete the fault.
*
* Return 0 on success, or an error.
*/
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{
struct iommu_param *param = dev->iommu_param;
+ struct iommu_fault_event *evt_pending = NULL;
struct iommu_fault_param *fparam;
int ret = 0;
@@ -987,13 +1004,86 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
ret = -EINVAL;
goto done_unlock;
}
+
+ if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
+ (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+ evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
+ GFP_KERNEL);
+ if (!evt_pending) {
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+ mutex_lock(&fparam->lock);
+ list_add_tail(&evt_pending->list, &fparam->faults);
+ mutex_unlock(&fparam->lock);
+ }
+
ret = fparam->handler(&evt->fault, fparam->data);
+ if (ret && evt_pending) {
+ mutex_lock(&fparam->lock);
+ list_del(&evt_pending->list);
+ mutex_unlock(&fparam->lock);
+ kfree(evt_pending);
+ }
done_unlock:
mutex_unlock(&param->lock);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
+int iommu_page_response(struct device *dev,
+ struct iommu_page_response *msg)
+{
+ bool pasid_valid;
+ int ret = -EINVAL;
+ struct iommu_fault_event *evt;
+ struct iommu_fault_page_request *prm;
+ struct iommu_param *param = dev->iommu_param;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (!domain || !domain->ops->page_response)
+ return -ENODEV;
+
+ if (!param || !param->fault_param)
+ return -EINVAL;
+
+ if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
+ msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
+ return -EINVAL;
+
+ /* Only send response if there is a fault report pending */
+ mutex_lock(&param->fault_param->lock);
+ if (list_empty(&param->fault_param->faults)) {
+ dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
+ goto done_unlock;
+ }
+ /*
+ * Check if we have a matching page request pending to respond,
+ * otherwise return -EINVAL
+ */
+ list_for_each_entry(evt, &param->fault_param->faults, list) {
+ prm = &evt->fault.prm;
+ pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+
+ if ((pasid_valid && prm->pasid != msg->pasid) ||
+ prm->grpid != msg->grpid)
+ continue;
+
+ /* Sanitize the reply */
+ msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+
+ ret = domain->ops->page_response(dev, evt, msg);
+ list_del(&evt->list);
+ kfree(evt);
+ break;
+ }
+
+done_unlock:
+ mutex_unlock(&param->fault_param->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_page_response);
+
/**
* iommu_group_id - Return ID for a group
* @group: the group to ID