Nested translation has stage-1 and stage-2 page tables. A stage-1 page
table is managed by user space, and it needs to work with a stage-2 page
table, which is a parent hwpt for the stage-1 hwpt.
iommu core already supports accepting parent iommu_domain and user_data
to allocate an iommu_domain. This makes iommufd_hw_pagetable_alloc() to
accept the parent hwpt and user_data, and relays them to iommu core, to
prepare for supporting hw_pagetable allocation with user_data.
Also, add a parent pointer in struct iommufd_hw_pagetable for taking and
releasing its refcount.
Co-developed-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
---
drivers/iommu/iommufd/device.c | 3 +-
drivers/iommu/iommufd/hw_pagetable.c | 40 +++++++++++++++++++++----
drivers/iommu/iommufd/iommufd_private.h | 8 ++++-
3 files changed, 44 insertions(+), 7 deletions(-)
> From: Liu, Yi L <yi.l.liu@intel.com>
> Sent: Monday, July 24, 2023 7:04 PM
>
> Nested translation has stage-1 and stage-2 page tables. A stage-1 page
> table is managed by user space, and it needs to work with a stage-2 page
> table, which is a parent hwpt for the stage-1 hwpt.
>
> iommu core already supports accepting parent iommu_domain and
> user_data
> to allocate an iommu_domain. This makes iommufd_hw_pagetable_alloc()
> to
> accept the parent hwpt and user_data, and relays them to iommu core, to
> prepare for supporting hw_pagetable allocation with user_data.
>
> Also, add a parent pointer in struct iommufd_hw_pagetable for taking and
> releasing its refcount.
>
> Co-developed-by: Nicolin Chen <nicolinc@nvidia.com>
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
@@ -542,7 +542,8 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
}
hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev,
- immediate_attach);
+ IOMMU_HWPT_TYPE_DEFAULT,
+ NULL, NULL, immediate_attach);
if (IS_ERR(hwpt)) {
destroy_hwpt = ERR_CAST(hwpt);
goto out_unlock;
@@ -24,6 +24,8 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
if (hwpt->domain)
iommu_domain_free(hwpt->domain);
+ if (hwpt->parent)
+ refcount_dec(&hwpt->parent->obj.users);
refcount_dec(&hwpt->ioas->obj.users);
}
@@ -61,6 +63,9 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
* @ictx: iommufd context
* @ioas: IOAS to associate the domain with
* @idev: Device to get an iommu_domain for
+ * @hwpt_type: Requested type of hw_pagetable
+ * @parent: Optional parent HWPT to associate with
+ * @user_data: Optional user_data pointer
* @immediate_attach: True if idev should be attached to the hwpt
*
* Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT
@@ -73,14 +78,24 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt)
*/
struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
- struct iommufd_device *idev, bool immediate_attach)
+ struct iommufd_device *idev,
+ enum iommu_hwpt_type hwpt_type,
+ struct iommufd_hw_pagetable *parent,
+ union iommu_domain_user_data *user_data,
+ bool immediate_attach)
{
const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
+ struct iommu_domain *parent_domain = NULL;
struct iommufd_hw_pagetable *hwpt;
int rc;
lockdep_assert_held(&ioas->mutex);
+ if (parent && !user_data)
+ return ERR_PTR(-EINVAL);
+ if (user_data && !ops->domain_alloc_user)
+ return ERR_PTR(-EOPNOTSUPP);
+
hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE);
if (IS_ERR(hwpt))
return hwpt;
@@ -89,11 +104,15 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
/* Pairs with iommufd_hw_pagetable_destroy() */
refcount_inc(&ioas->obj.users);
hwpt->ioas = ioas;
+ if (parent) {
+ hwpt->parent = parent;
+ parent_domain = parent->domain;
+ refcount_inc(&parent->obj.users);
+ }
if (ops->domain_alloc_user) {
- hwpt->domain = ops->domain_alloc_user(idev->dev,
- IOMMU_HWPT_TYPE_DEFAULT,
- NULL, NULL);
+ hwpt->domain = ops->domain_alloc_user(idev->dev, hwpt_type,
+ parent_domain, user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
@@ -107,6 +126,15 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
}
}
+ /* It must be either NESTED or UNMANAGED, depending on parent_domain */
+ if (WARN_ON_ONCE((parent_domain &&
+ hwpt->domain->type != IOMMU_DOMAIN_NESTED) ||
+ (!parent_domain &&
+ hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED))) {
+ rc = -EINVAL;
+ goto out_abort;
+ }
+
/*
* Set the coherency mode before we do iopt_table_add_domain() as some
* iommus have a per-PTE bit that controls it and need to decide before
@@ -168,7 +196,9 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
}
mutex_lock(&ioas->mutex);
- hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false);
+ hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev,
+ IOMMU_HWPT_TYPE_DEFAULT,
+ NULL, NULL, false);
if (IS_ERR(hwpt)) {
rc = PTR_ERR(hwpt);
goto out_unlock;
@@ -8,6 +8,7 @@
#include <linux/xarray.h>
#include <linux/refcount.h>
#include <linux/uaccess.h>
+#include <linux/iommu.h>
struct iommu_domain;
struct iommu_group;
@@ -243,6 +244,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd);
*/
struct iommufd_hw_pagetable {
struct iommufd_object obj;
+ struct iommufd_hw_pagetable *parent;
struct iommufd_ioas *ioas;
struct iommu_domain *domain;
bool auto_domain : 1;
@@ -254,7 +256,11 @@ struct iommufd_hw_pagetable {
struct iommufd_hw_pagetable *
iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
- struct iommufd_device *idev, bool immediate_attach);
+ struct iommufd_device *idev,
+ enum iommu_hwpt_type hwpt_type,
+ struct iommufd_hw_pagetable *parent,
+ union iommu_domain_user_data *user_data,
+ bool immediate_attach);
int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt);
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev);