[v7,3/3] iommu/vt-d: Add iotlb flush for nested domain
Commit Message
This implements the .cache_invalidate_user() callback to support iotlb
flush for nested domain.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
---
drivers/iommu/intel/nested.c | 54 ++++++++++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)
Comments
> From: Liu, Yi L <yi.l.liu@intel.com>
> Sent: Friday, November 17, 2023 9:18 PM
> +
> + if (inv_info.__reserved || (inv_info.flags &
> ~IOMMU_VTD_INV_FLAGS_LEAF) ||
> + !IS_ALIGNED(inv_info.addr, VTD_PAGE_SIZE)) {
> + ret = -EINVAL;
> + break;
> + }
-EOPNOTSUPP for the first two checks.
On Fri, Nov 17, 2023 at 05:18:16AM -0800, Yi Liu wrote:
> +static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
> + struct iommu_user_data_array *array,
> + u32 *cerror_idx)
> +{
> + struct dmar_domain *dmar_domain = to_dmar_domain(domain);
> + struct iommu_hwpt_vtd_s1_invalidate inv_info;
> + u32 index;
> + int ret;
> +
> + /* REVISIT:
> + * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
> + * but no error code yet, so just set the error code to be 0.
> + */
> + *cerror_idx = 0;
> +
> + for (index = 0; index < array->entry_num; index++) {
> + ret = iommu_copy_struct_from_user_array(&inv_info, array,
> + IOMMU_HWPT_DATA_VTD_S1,
> + index, __reserved);
> + if (ret) {
> + pr_err_ratelimited("Failed to fetch invalidation request\n");
> + break;
No error prints on ioctls!
> + if (inv_info.addr == 0 && inv_info.npages == -1)
> + intel_flush_iotlb_all(domain);
-1 is clearer written as U64_MAX - same remark for the comment
documenting it.
Jason
On 2023/11/20 16:32, Tian, Kevin wrote:
>> From: Liu, Yi L <yi.l.liu@intel.com>
>> Sent: Friday, November 17, 2023 9:18 PM
>> +
>> + if (inv_info.__reserved || (inv_info.flags &
>> ~IOMMU_VTD_INV_FLAGS_LEAF) ||
>> + !IS_ALIGNED(inv_info.addr, VTD_PAGE_SIZE)) {
>> + ret = -EINVAL;
>> + break;
>> + }
>
> -EOPNOTSUPP for the first two checks.
>
yes.
On 2023/12/7 02:56, Jason Gunthorpe wrote:
> On Fri, Nov 17, 2023 at 05:18:16AM -0800, Yi Liu wrote:
>> +static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
>> + struct iommu_user_data_array *array,
>> + u32 *cerror_idx)
>> +{
>> + struct dmar_domain *dmar_domain = to_dmar_domain(domain);
>> + struct iommu_hwpt_vtd_s1_invalidate inv_info;
>> + u32 index;
>> + int ret;
>> +
>> + /* REVISIT:
>> + * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
>> + * but no error code yet, so just set the error code to be 0.
>> + */
>> + *cerror_idx = 0;
>> +
>> + for (index = 0; index < array->entry_num; index++) {
>> + ret = iommu_copy_struct_from_user_array(&inv_info, array,
>> + IOMMU_HWPT_DATA_VTD_S1,
>> + index, __reserved);
>> + if (ret) {
>> + pr_err_ratelimited("Failed to fetch invalidation request\n");
>> + break;
>
> No error prints on ioctls!
ok, will remove it.
>
>> + if (inv_info.addr == 0 && inv_info.npages == -1)
>> + intel_flush_iotlb_all(domain);
>
> -1 is clearer written as U64_MAX - same remark for the comment
> documenting it.
sure.
@@ -73,9 +73,63 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
kfree(to_dmar_domain(domain));
}
+static void domain_flush_iotlb_psi(struct dmar_domain *domain,
+ u64 addr, unsigned long npages)
+{
+ struct iommu_domain_info *info;
+ unsigned long i;
+
+ xa_for_each(&domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, domain,
+ addr >> VTD_PAGE_SHIFT, npages, 1, 0);
+}
+
+static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
+ struct iommu_user_data_array *array,
+ u32 *cerror_idx)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct iommu_hwpt_vtd_s1_invalidate inv_info;
+ u32 index;
+ int ret;
+
+ /* REVISIT:
+ * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
+ * but no error code yet, so just set the error code to be 0.
+ */
+ *cerror_idx = 0;
+
+ for (index = 0; index < array->entry_num; index++) {
+ ret = iommu_copy_struct_from_user_array(&inv_info, array,
+ IOMMU_HWPT_DATA_VTD_S1,
+ index, __reserved);
+ if (ret) {
+ pr_err_ratelimited("Failed to fetch invalidation request\n");
+ break;
+ }
+
+ if (inv_info.__reserved || (inv_info.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
+ !IS_ALIGNED(inv_info.addr, VTD_PAGE_SIZE)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (inv_info.addr == 0 && inv_info.npages == -1)
+ intel_flush_iotlb_all(domain);
+ else
+ domain_flush_iotlb_psi(dmar_domain,
+ inv_info.addr, inv_info.npages);
+ }
+
+ array->entry_num = index;
+
+ return ret;
+}
+
static const struct iommu_domain_ops intel_nested_domain_ops = {
.attach_dev = intel_nested_attach_dev,
.free = intel_nested_domain_free,
+ .cache_invalidate_user = intel_nested_cache_invalidate_user,
};
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,