@@ -128,7 +128,6 @@ struct iommu_page_response {
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
-typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
@@ -671,8 +670,6 @@ struct iommu_fault_event {
/**
* struct iommu_fault_param - per-device IOMMU fault data
- * @handler: Callback function to handle IOMMU faults at device level
- * @data: handler private data
* @lock: protect pending faults list
* @dev: the device that owns this param
* @queue: IOPF queue
@@ -682,8 +679,6 @@ struct iommu_fault_event {
* @faults: holds the pending faults which need response
*/
struct iommu_fault_param {
- iommu_dev_fault_handler_t handler;
- void *data;
struct mutex lock;
struct device *dev;
@@ -806,11 +801,6 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data);
-
-extern int iommu_unregister_device_fault_handler(struct device *dev);
extern int iommu_report_device_fault(struct device *dev,
struct iommu_fault_event *evt);
@@ -1222,19 +1212,6 @@ static inline void iommu_group_put(struct iommu_group *group)
{
}
-static inline
-int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data)
-{
- return -ENODEV;
-}
-
-static inline int iommu_unregister_device_fault_handler(struct device *dev)
-{
- return 0;
-}
-
static inline
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{
@@ -13,7 +13,7 @@ struct iommu_fault;
struct iopf_queue;
#ifdef CONFIG_IOMMU_SVA
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
+int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev);
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
int iopf_queue_remove_device(struct iopf_queue *queue,
@@ -26,7 +26,7 @@ enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
#else /* CONFIG_IOMMU_SVA */
-static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+static inline int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
{
return -ENODEV;
}
@@ -487,7 +487,6 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
{
- int ret;
struct device *dev = master->dev;
/*
@@ -500,16 +499,7 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return -EINVAL;
- ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
- if (ret)
- return ret;
-
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret) {
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- return ret;
- }
- return 0;
+ return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
}
static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
@@ -519,7 +509,6 @@ static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return;
- iommu_unregister_device_fault_handler(dev);
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
}
@@ -4427,23 +4427,15 @@ static int intel_iommu_enable_iopf(struct device *dev)
if (ret)
return ret;
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret)
- goto iopf_remove_device;
-
ret = pci_enable_pri(pdev, PRQ_DEPTH);
- if (ret)
- goto iopf_unregister_handler;
+ if (ret) {
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
+ return ret;
+ }
+
info->pri_enabled = 1;
return 0;
-
-iopf_unregister_handler:
- iommu_unregister_device_fault_handler(dev);
-iopf_remove_device:
- iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- return ret;
}
static int intel_iommu_disable_iopf(struct device *dev)
@@ -4466,11 +4458,9 @@ static int intel_iommu_disable_iopf(struct device *dev)
info->pri_enabled = 0;
/*
- * With PRI disabled and outstanding PRQs drained, unregistering
- * fault handler and removing device from iopf queue should never
- * fail.
+ * With PRI disabled and outstanding PRQs drained, removing device
+ * from iopf queue should never fail.
*/
- WARN_ON(iommu_unregister_device_fault_handler(dev));
WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
return 0;
@@ -87,7 +87,7 @@ static void iopf_handler(struct work_struct *work)
/**
* iommu_queue_iopf - IO Page Fault handler
* @fault: fault event
- * @cookie: struct device, passed to iommu_register_device_fault_handler.
+ * @dev: struct device.
*
* Add a fault to the device workqueue, to be handled by mm.
*
@@ -124,14 +124,12 @@ static void iopf_handler(struct work_struct *work)
*
* Return: 0 on success and <0 on error.
*/
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
{
int ret;
struct iopf_group *group;
struct iopf_fault *iopf, *next;
struct iommu_fault_param *iopf_param;
-
- struct device *dev = cookie;
struct dev_iommu *param = dev->iommu;
lockdep_assert_held(¶m->lock);
@@ -1330,76 +1330,6 @@ void iommu_group_put(struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_group_put);
-/**
- * iommu_register_device_fault_handler() - Register a device fault handler
- * @dev: the device
- * @handler: the fault handler
- * @data: private data passed as argument to the handler
- *
- * When an IOMMU fault event is received, this handler gets called with the
- * fault event and data as argument. The handler should return 0 on success. If
- * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
- * complete the fault by calling iommu_page_response() with one of the following
- * response code:
- * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
- * - IOMMU_PAGE_RESP_INVALID: terminate the fault
- * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
- * page faults if possible.
- *
- * Return 0 if the fault handler was installed successfully, or an error.
- */
-int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data)
-{
- struct dev_iommu *param = dev->iommu;
- int ret = 0;
-
- if (!param || !param->fault_param)
- return -EINVAL;
-
- mutex_lock(¶m->lock);
- /* Only allow one fault handler registered for each device */
- if (param->fault_param->handler) {
- ret = -EBUSY;
- goto done_unlock;
- }
-
- param->fault_param->handler = handler;
- param->fault_param->data = data;
-
-done_unlock:
- mutex_unlock(¶m->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
-
-/**
- * iommu_unregister_device_fault_handler() - Unregister the device fault handler
- * @dev: the device
- *
- * Remove the device fault handler installed with
- * iommu_register_device_fault_handler().
- *
- * Return 0 on success, or an error.
- */
-int iommu_unregister_device_fault_handler(struct device *dev)
-{
- struct dev_iommu *param = dev->iommu;
-
- if (!param || !param->fault_param)
- return -EINVAL;
-
- mutex_lock(¶m->lock);
- param->fault_param->handler = NULL;
- param->fault_param->data = NULL;
- mutex_unlock(¶m->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
-
/**
* iommu_report_device_fault() - Report fault event to device driver
* @dev: the device
@@ -1424,10 +1354,6 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
/* we only report device fault if there is a handler registered */
mutex_lock(¶m->lock);
fparam = param->fault_param;
- if (!fparam || !fparam->handler) {
- ret = -EINVAL;
- goto done_unlock;
- }
if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
(evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
@@ -1442,7 +1368,7 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
mutex_unlock(&fparam->lock);
}
- ret = fparam->handler(&evt->fault, fparam->data);
+ ret = iommu_queue_iopf(&evt->fault, dev);
if (ret && evt_pending) {
mutex_lock(&fparam->lock);
list_del(&evt_pending->list);