[v5,09/11] iommu/vt-d: Add iotlb flush for nested domain
Commit Message
This implements the .cache_invalidate_user() callback to support iotlb
flush for nested domain.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
---
drivers/iommu/intel/nested.c | 58 ++++++++++++++++++++++++++++++++++++
1 file changed, 58 insertions(+)
Comments
> From: Liu, Yi L <yi.l.liu@intel.com>
> Sent: Thursday, September 21, 2023 3:54 PM
> +
> + /* REVISIT:
> + * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
> + * but no error code yet, so just set the error code to be 0.
> + */
> + *cerror_idx = 0;
> +
Is it "hardware doesn't provide error code now though it's defined in
spec" or "intel-iommu driver doesn't retrieve the error code though
it's provided by the hardware"?
Is there guarantee that '0' isn't used for an existing error code or
won't be used for any new error code later?
On 2023/9/27 14:53, Tian, Kevin wrote:
>> From: Liu, Yi L <yi.l.liu@intel.com>
>> Sent: Thursday, September 21, 2023 3:54 PM
>> +
>> + /* REVISIT:
>> + * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
>> + * but no error code yet, so just set the error code to be 0.
>> + */
>> + *cerror_idx = 0;
>> +
>
> Is it "hardware doesn't provide error code now though it's defined in
> spec" or "intel-iommu driver doesn't retrieve the error code though
> it's provided by the hardware"?
I didn't see vtd spec defines error code for cache invalidation. :(
>
> Is there guarantee that '0' isn't used for an existing error code or
> won't be used for any new error code later?
may need to check it.
@@ -68,9 +68,67 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
kfree(to_dmar_domain(domain));
}
+static void domain_flush_iotlb_psi(struct dmar_domain *domain,
+ u64 addr, unsigned long npages)
+{
+ struct iommu_domain_info *info;
+ unsigned long i;
+
+ xa_for_each(&domain->iommu_array, i, info)
+ iommu_flush_iotlb_psi(info->iommu, domain,
+ addr >> VTD_PAGE_SHIFT, npages, 1, 0);
+}
+
+static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
+ struct iommu_user_data_array *array,
+ u32 *cerror_idx)
+{
+ const size_t min_len =
+ offsetofend(struct iommu_hwpt_vtd_s1_invalidate, __reserved);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct iommu_hwpt_vtd_s1_invalidate inv_info;
+ u32 index;
+ int ret;
+
+ /* REVISIT:
+ * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
+ * but no error code yet, so just set the error code to be 0.
+ */
+ *cerror_idx = 0;
+
+ if (array->entry_len < min_len)
+ return -EINVAL;
+
+ for (index = 0; index < array->entry_num; index++) {
+ ret = iommu_copy_user_data_from_array(&inv_info, array, index,
+ sizeof(inv_info), min_len);
+ if (ret) {
+ pr_err_ratelimited("Failed to fetch invalidation request\n");
+ break;
+ }
+
+ if (inv_info.__reserved || (inv_info.flags & ~IOMMU_VTD_QI_FLAGS_LEAF) ||
+ !IS_ALIGNED(inv_info.addr, VTD_PAGE_SIZE)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (inv_info.addr == 0 && inv_info.npages == -1)
+ intel_flush_iotlb_all(domain);
+ else
+ domain_flush_iotlb_psi(dmar_domain,
+ inv_info.addr, inv_info.npages);
+ }
+
+ array->entry_num = index;
+
+ return ret;
+}
+
static const struct iommu_domain_ops intel_nested_domain_ops = {
.attach_dev = intel_nested_attach_dev,
.free = intel_nested_domain_free,
+ .cache_invalidate_user = intel_nested_cache_invalidate_user,
};
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *s2_domain,