[RFC,09/18] intel-iommu: Use pkernfs for root/context pgtable pages

Message ID 20240205120203.60312-10-jgowans@amazon.com
State New
Headers
Series Pkernfs: Support persistence for live update |

Commit Message

Gowans, James Feb. 5, 2024, 12:01 p.m. UTC
  The previous commits were preparation for using pkernfs memory for IOMMU
pgtables: a file in the filesystem is available and an allocator to
allocate 4-KiB pages from that file is available.

Now use those to actually use pkernfs memory for root and context
pgtable pages. If pkernfs is enabled then a "region" (physical and
virtual memory chunk) is fetch from pkernfs and used to drive the
allocator. Should this rather just be a pointer to a pkernfs inode? That
abstraction seems leaky but without having the ability to store struct
files at this point it's probably the more accurate.

The freeing still needs to be hooked into the allocator...
---
 drivers/iommu/intel/iommu.c | 24 ++++++++++++++++++++----
 drivers/iommu/intel/iommu.h |  2 ++
 2 files changed, 22 insertions(+), 4 deletions(-)
  

Patch

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 744e4e6b8d72..2dd3f055dbce 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -19,6 +19,7 @@ 
 #include <linux/memory.h>
 #include <linux/pci.h>
 #include <linux/pci-ats.h>
+#include <linux/pkernfs.h>
 #include <linux/spinlock.h>
 #include <linux/syscore_ops.h>
 #include <linux/tboot.h>
@@ -28,6 +29,7 @@ 
 #include "../dma-iommu.h"
 #include "../irq_remapping.h"
 #include "../iommu-sva.h"
+#include "../pgtable_alloc.h"
 #include "pasid.h"
 #include "cap_audit.h"
 #include "perfmon.h"
@@ -617,7 +619,12 @@  struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
 		if (!alloc)
 			return NULL;
 
-		context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
+		if (pkernfs_enabled())
+			iommu_alloc_page_from_region(
+				&iommu->pkernfs_region,
+				(void **) &context, NULL);
+		else
+			context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
 		if (!context)
 			return NULL;
 
@@ -1190,7 +1197,15 @@  static int iommu_alloc_root_entry(struct intel_iommu *iommu)
 {
 	struct root_entry *root;
 
-	root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
+	if (pkernfs_enabled()) {
+		pkernfs_alloc_iommu_root_pgtables(&iommu->pkernfs_region);
+		root = pgtable_get_root_page(
+				&iommu->pkernfs_region,
+				liveupdate);
+	} else {
+		root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
+	}
+
 	if (!root) {
 		pr_err("Allocating root entry for %s failed\n",
 			iommu->name);
@@ -2790,7 +2805,7 @@  static int __init init_dmars(void)
 
 		init_translation_status(iommu);
 
-		if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
+		if (translation_pre_enabled(iommu) && !is_kdump_kernel() && !liveupdate) {
 			iommu_disable_translation(iommu);
 			clear_translation_pre_enabled(iommu);
 			pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
@@ -2806,7 +2821,8 @@  static int __init init_dmars(void)
 		if (ret)
 			goto free_iommu;
 
-		if (translation_pre_enabled(iommu)) {
+		/* For the live update case restore pgtables, don't copy */
+		if (translation_pre_enabled(iommu) && !liveupdate) {
 			pr_info("Translation already enabled - trying to copy translation structures\n");
 
 			ret = copy_translation_tables(iommu);
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index e6a3e7065616..a2338e398ba3 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -22,6 +22,7 @@ 
 #include <linux/bitfield.h>
 #include <linux/xarray.h>
 #include <linux/perf_event.h>
+#include <linux/pkernfs.h>
 
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
@@ -672,6 +673,7 @@  struct intel_iommu {
 	unsigned long	*copied_tables; /* bitmap of copied tables */
 	spinlock_t	lock; /* protect context, domain ids */
 	struct root_entry *root_entry; /* virtual address */
+	struct pkernfs_region pkernfs_region;
 
 	struct iommu_flush flush;
 #endif