[v2,3/3] iommu/vt-d: debugfs: Support dumping a specified page table

Message ID 20230922151636.77139-4-Jingqi.liu@intel.com
State New
Headers
Series iommu/vt-d: debugfs: Enhancements to IOMMU debugfs |

Commit Message

Liu, Jingqi Sept. 22, 2023, 3:16 p.m. UTC
  The original debugfs only dumps all page tables without pasid. With
pasid supported, the page table with pasid also needs to be dumped.

This patch supports dumping a specified page table in legacy mode or
scalable mode with or without a specified pasid.

For legacy mode, according to bus number and DEVFN, traverse the root
table and context table to get the pointer of page table in the
context table entry, then dump the specified page table.

For scalable mode, according to bus number, DEVFN and pasid, traverse
the root table, context table, pasid directory and pasid table to get
the pointer of page table in the pasid table entry, then dump the
specified page table..

Examples are as follows:
1) Dump the page table of device "0000:00:1f.0" that only supports
   legacy mode.
   $ sudo cat
   /sys/kernel/debug/iommu/intel/0000:00:1f.0/0/domain_translation_struct

2) Dump the page table of device "0000:00:0a.0" with PASID "1" that
   supports scalable mode.
   $ sudo cat
   /sys/kernel/debug/iommu/intel/0000:00:0a.0/1/domain_translation_struct

Suggested-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jingqi Liu <Jingqi.liu@intel.com>
---
 drivers/iommu/intel/debugfs.c | 163 +++++++++++++++++++++++++---------
 1 file changed, 121 insertions(+), 42 deletions(-)
  

Patch

diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index 9128febba3c6..51f0e022c06e 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -352,58 +352,137 @@  static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
 	}
 }
 
-static int __show_device_domain_translation(struct device *dev, void *data)
+static int domain_translation_struct_show(struct seq_file *m, void *unused)
 {
-	struct dmar_domain *domain;
-	struct seq_file *m = data;
-	u64 path[6] = { 0 };
+	struct device_domain_info *info;
+	struct show_domain_info *sinfo;
+	bool scalable, found = false;
+	struct dmar_drhd_unit *drhd;
+	struct intel_iommu *iommu;
+	u16 devfn, bus, seg;
 
-	domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
-	if (!domain)
-		return 0;
+	if (!m || !m->private) {
+		seq_puts(m, "Invalid device or pasid!\n");
+		return -EINVAL;
+	}
 
-	seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
-		   (u64)virt_to_phys(domain->pgd));
-	seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
+	sinfo = (struct show_domain_info*)m->private;
+	if (!sinfo->dev ||
+	    !dev_iommu_priv_get(sinfo->dev) ||
+	    (sinfo->pasid == IOMMU_PASID_INVALID)) {
+		seq_puts(m, "Please specify device or pasid!\n");
+		return -ENODEV;
+	}
 
-	pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
-	seq_putc(m, '\n');
+	info = dev_iommu_priv_get(sinfo->dev);
+	bus = info->bus;
+	devfn = info->devfn;
+	seg = info->segment;
 
-	/* Don't iterate */
-	return 1;
-}
+	rcu_read_lock();
+	for_each_active_iommu(iommu, drhd) {
+		struct context_entry *context;
+		u64 pgd, path[6] = { 0 };
+		u32 sts, agaw;
 
-static int show_device_domain_translation(struct device *dev, void *data)
-{
-	struct iommu_group *group;
+		if (seg != iommu->segment)
+			continue;
 
-	device_lock(dev);
-	group = iommu_group_get(dev);
-	device_unlock(dev);
-	if (!group)
-		return 0;
+		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+		if (!(sts & DMA_GSTS_TES)) {
+			seq_printf(m, "DMA Remapping is not enabled on %s\n",
+				   iommu->name);
+			continue;
+		}
+		if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT)
+			scalable = true;
+		else
+			scalable = false;
 
-	/*
-	 * The group->mutex is held across the callback, which will
-	 * block calls to iommu_attach/detach_group/device. Hence,
-	 * the domain of the device will not change during traversal.
-	 *
-	 * All devices in an iommu group share a single domain, hence
-	 * we only dump the domain of the first device. Even though,
-	 * this code still possibly races with the iommu_unmap()
-	 * interface. This could be solved by RCU-freeing the page
-	 * table pages in the iommu_unmap() path.
-	 */
-	iommu_group_for_each_dev(group, data, __show_device_domain_translation);
-	iommu_group_put(group);
+		/*
+		 * The iommu->lock is held across the callback, which will
+		 * block calls to domain_attach/domain_detach. Hence,
+		 * the domain of the device will not change during traversal.
+		 *
+		 * Traversing page table possibly races with the iommu_unmap()
+		 * interface. This could be solved by RCU-freeing the page
+		 * table pages in the iommu_unmap() path.
+		 */
+		spin_lock(&iommu->lock);
 
-	return 0;
-}
+		context = iommu_context_addr(iommu, bus, devfn, 0);
+		if (!context || !context_present(context))
+			goto iommu_unlock;
 
-static int domain_translation_struct_show(struct seq_file *m, void *unused)
-{
-	return bus_for_each_dev(&pci_bus_type, NULL, m,
-				show_device_domain_translation);
+		if (scalable) {	/* scalable mode */
+			struct pasid_dir_entry *dir_tbl, *dir_entry;
+			struct pasid_entry *pasid_tbl, *pasid_tbl_entry;
+			u16 pasid_dir_size, dir_idx, tbl_idx, pgtt;
+			u64 pasid_dir_ptr;
+
+			pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
+			pasid_dir_size = get_pasid_dir_size(context);
+
+			/* Dump specified device domain mappings with PASID. */
+			dir_idx = sinfo->pasid >> PASID_PDE_SHIFT;
+			tbl_idx = sinfo->pasid & PASID_PTE_MASK;
+
+			dir_tbl = phys_to_virt(pasid_dir_ptr);
+			dir_entry = &dir_tbl[dir_idx];
+
+			pasid_tbl = get_pasid_table_from_pde(dir_entry);
+			if (!pasid_tbl)
+				goto iommu_unlock;
+
+			pasid_tbl_entry = &pasid_tbl[tbl_idx];
+			if (!pasid_pte_is_present(pasid_tbl_entry))
+				goto iommu_unlock;
+
+			/*
+			 * According to PASID Granular Translation Type(PGTT),
+			 * get the page table pointer.
+			 */
+			pgtt = (u16)(pasid_tbl_entry->val[0] & GENMASK_ULL(8, 6)) >> 6;
+			agaw = (u8)(pasid_tbl_entry->val[0] & GENMASK_ULL(4, 2)) >> 2;
+
+			switch (pgtt) {
+				case PASID_ENTRY_PGTT_FL_ONLY:
+					pgd = pasid_tbl_entry->val[2];
+					break;
+				case PASID_ENTRY_PGTT_SL_ONLY:
+				case PASID_ENTRY_PGTT_NESTED:
+					pgd = pasid_tbl_entry->val[0];
+					break;
+				default:
+					goto iommu_unlock;
+			}
+			pgd &= VTD_PAGE_MASK;
+		} else { /* legacy mode */
+			pgd = context->lo & VTD_PAGE_MASK;
+			agaw = context->hi & 7;
+		}
+
+		seq_printf(m, "Device %04x:%02x:%02x.%x ",
+			   iommu->segment, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+		if (scalable)
+			seq_printf(m, "with pasid %x @0x%llx\n", sinfo->pasid, pgd);
+		else
+			seq_printf(m, "@0x%llx\n", pgd);
+
+		seq_printf(m, "%-17s\t%-18s\t%-18s\t%-18s\t%-18s\t%-s\n",
+			   "IOVA_PFN", "PML5E", "PML4E", "PDPE", "PDE", "PTE");
+		pgtable_walk_level(m, phys_to_virt(pgd), agaw + 2, 0, path);
+
+		found = true;
+iommu_unlock:
+		spin_unlock(&iommu->lock);
+		if (found)
+			break;
+	}
+	rcu_read_unlock();
+
+	return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);