[2/7] iommu/vt-d: Add blocking domain support

Message ID 20221103055329.633052-3-baolu.lu@linux.intel.com
State New
Headers
Series iommu/vt-d: Some cleanups |

Commit Message

Baolu Lu Nov. 3, 2022, 5:53 a.m. UTC
  The Intel IOMMU hardwares support blocking DMA transactions by clearing
the translation table entries. This implements a real blocking domain to
avoid using an empty UNMANAGED domain. The detach_dev callback of the
domain ops is not used in any path. Remove it to avoid dead code as well.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/intel/iommu.c | 56 +++++++++++++++++++++++++++++++------
 1 file changed, 48 insertions(+), 8 deletions(-)
  

Comments

Tian, Kevin Nov. 4, 2022, 2:11 a.m. UTC | #1
> From: Lu Baolu <baolu.lu@linux.intel.com>
> Sent: Thursday, November 3, 2022 1:53 PM
> 
> +/*
> + * Clear the page table pointer in context or pasid table entries so that
> + * all DMA requests without PASID from the device are blocked. If the page
> + * table has been set, clean up the data structures.
> + */
> +static void device_block_translation(struct device *dev)

given this helper will be used both by existing paths and the new block
domain, it makes more sense to have it work with existing paths first
i.e. merging with patch3 and then add block domain after.

> +{
> +	struct device_domain_info *info = dev_iommu_priv_get(dev);
> +	struct intel_iommu *iommu = info->iommu;
> +	unsigned long flags;
> +
> +	if (!dev_is_real_dma_subdevice(dev)) {
> +		if (sm_supported(iommu))
> +			intel_pasid_tear_down_entry(iommu, dev,
> +						    PASID_RID2PASID, false);

Since commit 4140d77a aliases devices don't share pasid table,
which implies that pci_for_each_dma_alias() is required as did
in domain_context_clear().

> +		else
> +			domain_context_clear(info);
> +	}
> +
> +	if (!info->domain)
> +		return;
> +
> +	spin_lock_irqsave(&info->domain->lock, flags);
> +	list_del(&info->link);
> +	spin_unlock_irqrestore(&info->domain->lock, flags);
> +
> +	domain_detach_iommu(info->domain, iommu);
> +	info->domain = NULL;
> +}
> +
  
Baolu Lu Nov. 5, 2022, 1:54 a.m. UTC | #2
On 2022/11/4 10:11, Tian, Kevin wrote:
>> From: Lu Baolu <baolu.lu@linux.intel.com>
>> Sent: Thursday, November 3, 2022 1:53 PM
>>
>> +/*
>> + * Clear the page table pointer in context or pasid table entries so that
>> + * all DMA requests without PASID from the device are blocked. If the page
>> + * table has been set, clean up the data structures.
>> + */
>> +static void device_block_translation(struct device *dev)
> 
> given this helper will be used both by existing paths and the new block
> domain, it makes more sense to have it work with existing paths first
> i.e. merging with patch3 and then add block domain after.

Yes. Sounds good.

> 
>> +{
>> +	struct device_domain_info *info = dev_iommu_priv_get(dev);
>> +	struct intel_iommu *iommu = info->iommu;
>> +	unsigned long flags;
>> +
>> +	if (!dev_is_real_dma_subdevice(dev)) {
>> +		if (sm_supported(iommu))
>> +			intel_pasid_tear_down_entry(iommu, dev,
>> +						    PASID_RID2PASID, false);
> 
> Since commit 4140d77a aliases devices don't share pasid table,
> which implies that pci_for_each_dma_alias() is required as did
> in domain_context_clear().

The PCI alias devices have already been covered by the iommu group
concept in the iommu core. On the contrary, I've been thinking about
retiring pci_for_each_dma_alias() in domain_context_clear().



> 
>> +		else
>> +			domain_context_clear(info);
>> +	}
>> +
>> +	if (!info->domain)
>> +		return;
>> +
>> +	spin_lock_irqsave(&info->domain->lock, flags);
>> +	list_del(&info->link);
>> +	spin_unlock_irqrestore(&info->domain->lock, flags);
>> +
>> +	domain_detach_iommu(info->domain, iommu);
>> +	info->domain = NULL;
>> +}
>> +

Best regards,
baolu
  

Patch

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index e28faba1095f..7374a03cbe27 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -278,6 +278,7 @@  static LIST_HEAD(dmar_satc_units);
 	list_for_each_entry(rmrr, &dmar_rmrr_units, list)
 
 static void dmar_remove_one_dev_info(struct device *dev);
+static void intel_iommu_domain_free(struct iommu_domain *domain);
 
 int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
 int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@@ -4132,12 +4133,58 @@  static int md_domain_init(struct dmar_domain *domain, int guest_width)
 	return 0;
 }
 
+/*
+ * Clear the page table pointer in context or pasid table entries so that
+ * all DMA requests without PASID from the device are blocked. If the page
+ * table has been set, clean up the data structures.
+ */
+static void device_block_translation(struct device *dev)
+{
+	struct device_domain_info *info = dev_iommu_priv_get(dev);
+	struct intel_iommu *iommu = info->iommu;
+	unsigned long flags;
+
+	if (!dev_is_real_dma_subdevice(dev)) {
+		if (sm_supported(iommu))
+			intel_pasid_tear_down_entry(iommu, dev,
+						    PASID_RID2PASID, false);
+		else
+			domain_context_clear(info);
+	}
+
+	if (!info->domain)
+		return;
+
+	spin_lock_irqsave(&info->domain->lock, flags);
+	list_del(&info->link);
+	spin_unlock_irqrestore(&info->domain->lock, flags);
+
+	domain_detach_iommu(info->domain, iommu);
+	info->domain = NULL;
+}
+
+static int blocking_domain_attach_dev(struct iommu_domain *domain,
+				      struct device *dev)
+{
+	device_block_translation(dev);
+	return 0;
+}
+
+static struct iommu_domain blocking_domain = {
+	.ops = &(const struct iommu_domain_ops) {
+		.attach_dev	= blocking_domain_attach_dev,
+		.free		= intel_iommu_domain_free
+	}
+};
+
 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 {
 	struct dmar_domain *dmar_domain;
 	struct iommu_domain *domain;
 
 	switch (type) {
+	case IOMMU_DOMAIN_BLOCKED:
+		return &blocking_domain;
 	case IOMMU_DOMAIN_DMA:
 	case IOMMU_DOMAIN_DMA_FQ:
 	case IOMMU_DOMAIN_UNMANAGED:
@@ -4172,7 +4219,7 @@  static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 
 static void intel_iommu_domain_free(struct iommu_domain *domain)
 {
-	if (domain != &si_domain->domain)
+	if (domain != &si_domain->domain && domain != &blocking_domain)
 		domain_exit(to_dmar_domain(domain));
 }
 
@@ -4246,12 +4293,6 @@  static int intel_iommu_attach_device(struct iommu_domain *domain,
 	return domain_add_dev_info(to_dmar_domain(domain), dev);
 }
 
-static void intel_iommu_detach_device(struct iommu_domain *domain,
-				      struct device *dev)
-{
-	dmar_remove_one_dev_info(dev);
-}
-
 static int intel_iommu_map(struct iommu_domain *domain,
 			   unsigned long iova, phys_addr_t hpa,
 			   size_t size, int iommu_prot, gfp_t gfp)
@@ -4759,7 +4800,6 @@  const struct iommu_ops intel_iommu_ops = {
 #endif
 	.default_domain_ops = &(const struct iommu_domain_ops) {
 		.attach_dev		= intel_iommu_attach_device,
-		.detach_dev		= intel_iommu_detach_device,
 		.map_pages		= intel_iommu_map_pages,
 		.unmap_pages		= intel_iommu_unmap_pages,
 		.iotlb_sync_map		= intel_iommu_iotlb_sync_map,