In nested translation, the stage-1 page table is user-managed but cached
by the IOMMU hardware, so an update on present page table entries in the
stage-1 page table should be followed with a cache invalidation.
Add an IOMMU_HWPT_INVALIDATE ioctl to support such a cache invalidation.
It takes hwpt_id to specify the iommu_domain, and a multi-entry array to
support multiple invalidation requests in one ioctl.
Co-developed-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
---
drivers/iommu/iommufd/hw_pagetable.c | 33 +++++++++++++++++++++++++
drivers/iommu/iommufd/iommufd_private.h | 9 +++++++
drivers/iommu/iommufd/main.c | 3 +++
include/uapi/linux/iommufd.h | 29 ++++++++++++++++++++++
4 files changed, 74 insertions(+)
@@ -382,3 +382,36 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
iommufd_put_object(&idev->obj);
return rc;
}
+
+int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
+ struct iommu_user_data_array data_array = {
+ .uptr = u64_to_user_ptr(cmd->reqs_uptr),
+ .entry_len = cmd->req_len,
+ .entry_num = cmd->req_num,
+ };
+ struct iommufd_hw_pagetable *hwpt;
+ int rc = 0;
+
+ if (!cmd->req_len || !cmd->req_num)
+ return -EOPNOTSUPP;
+
+ hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id);
+ if (IS_ERR(hwpt))
+ return PTR_ERR(hwpt);
+
+ if (!hwpt->user_managed) {
+ rc = -EINVAL;
+ goto out_put_hwpt;
+ }
+
+ rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain, &data_array,
+ &cmd->out_driver_error_code);
+ cmd->req_num = data_array.entry_num;
+ if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
+ return -EFAULT;
+out_put_hwpt:
+ iommufd_put_object(&hwpt->obj);
+ return rc;
+}
@@ -270,6 +270,7 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev);
void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
void iommufd_hw_pagetable_abort(struct iommufd_object *obj);
int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
+int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
struct iommufd_hw_pagetable *hwpt)
@@ -281,6 +282,14 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
refcount_dec(&hwpt->obj.users);
}
+static inline struct iommufd_hw_pagetable *
+iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id)
+{
+ return container_of(iommufd_get_object(ucmd->ictx, id,
+ IOMMUFD_OBJ_HW_PAGETABLE),
+ struct iommufd_hw_pagetable, obj);
+}
+
static inline bool
iommufd_hw_pagetable_compare_ioas(struct iommufd_hw_pagetable *old_hwpt,
struct iommufd_hw_pagetable *new_hwpt)
@@ -307,6 +307,7 @@ union ucmd_buffer {
struct iommu_destroy destroy;
struct iommu_hw_info info;
struct iommu_hwpt_alloc hwpt;
+ struct iommu_hwpt_invalidate cache;
struct iommu_ioas_alloc alloc;
struct iommu_ioas_allow_iovas allow_iovas;
struct iommu_ioas_copy ioas_copy;
@@ -342,6 +343,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
__reserved),
IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
__reserved),
+ IOCTL_OP(IOMMU_HWPT_INVALIDATE, iommufd_hwpt_invalidate,
+ struct iommu_hwpt_invalidate, out_driver_error_code),
IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl,
struct iommu_ioas_alloc, out_ioas_id),
IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas,
@@ -47,6 +47,7 @@ enum {
IOMMUFD_CMD_VFIO_IOAS,
IOMMUFD_CMD_HWPT_ALLOC,
IOMMUFD_CMD_GET_HW_INFO,
+ IOMMUFD_CMD_HWPT_INVALIDATE,
};
/**
@@ -478,4 +479,32 @@ struct iommu_hw_info {
__u32 __reserved;
};
#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
+
+/**
+ * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
+ * @size: sizeof(struct iommu_hwpt_invalidate)
+ * @hwpt_id: HWPT ID of target hardware page table for cache invalidation
+ * @reqs_uptr: User pointer to an array having @req_num of cache invalidation
+ * requests. The request entries in the array are of fixed width
+ * @req_len, and contain a user data structure for invalidation
+ * request specific to the given hardware page table.
+ * @req_len: Length (in bytes) of a request entry in the request array
+ * @req_num: Input the number of cache invalidation requests in the array.
+ * Output the number of requests successfully handled by kernel.
+ * @out_driver_error_code: Report a driver speicifc error code upon failure
+ *
+ * Invalidate the iommu cache for user-managed page table. Modifications on a
+ * user-managed page table should be followed by this operation to sync cache.
+ * Each ioctl can support one or more cache invalidation requests in the array
+ * that has a total size of @req_len * @req_num.
+ */
+struct iommu_hwpt_invalidate {
+ __u32 size;
+ __u32 hwpt_id;
+ __aligned_u64 reqs_uptr;
+ __u32 req_len;
+ __u32 req_num;
+ __u32 out_driver_error_code;
+};
+#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
#endif