[RFC,V1,2/5] swiotlb: Allow setting up default alignment of SWIOTLB region

Message ID 20240112055251.36101-3-vannapurve@google.com
State New
Headers
Series x86: CVMs: Align memory conversions to 2M granularity |

Commit Message

Vishal Annapurve Jan. 12, 2024, 5:52 a.m. UTC
  Allow adjusting alignment of SWIOTLB memory. CVMs can use this framework
to align the shared memory regions as needed.

Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
 include/linux/swiotlb.h |  5 +++++
 kernel/dma/swiotlb.c    | 12 +++++++++---
 2 files changed, 14 insertions(+), 3 deletions(-)
  

Patch

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 058901313405..450bd82cdb9f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -206,6 +206,7 @@  size_t swiotlb_max_mapping_size(struct device *dev);
 bool is_swiotlb_allocated(void);
 bool is_swiotlb_active(struct device *dev);
 void __init swiotlb_adjust_size(unsigned long size);
+void __init swiotlb_adjust_alignment(unsigned long alignment);
 phys_addr_t default_swiotlb_base(void);
 phys_addr_t default_swiotlb_limit(void);
 #else
@@ -247,6 +248,10 @@  static inline void swiotlb_adjust_size(unsigned long size)
 {
 }
 
+void __init swiotlb_adjust_alignment(unsigned long alignment)
+{
+}
+
 static inline phys_addr_t default_swiotlb_base(void)
 {
 	return 0;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index a056d2f8b9ee..eeab0607a028 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -97,6 +97,7 @@  static struct io_tlb_mem io_tlb_default_mem;
 #endif	/* CONFIG_SWIOTLB_DYNAMIC */
 
 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
+static unsigned long default_alignment = PAGE_SIZE;
 static unsigned long default_nareas;
 
 /**
@@ -223,6 +224,11 @@  void __init swiotlb_adjust_size(unsigned long size)
 	pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
 }
 
+void __init swiotlb_adjust_alignment(unsigned long alignment)
+{
+	default_alignment = alignment;
+}
+
 void swiotlb_print_info(void)
 {
 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
@@ -315,7 +321,7 @@  static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
 		unsigned int flags,
 		int (*remap)(void *tlb, unsigned long nslabs))
 {
-	size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
+	size_t bytes = ALIGN(nslabs << IO_TLB_SHIFT, default_alignment);
 	void *tlb;
 
 	/*
@@ -324,9 +330,9 @@  static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
 	 * memory encryption.
 	 */
 	if (flags & SWIOTLB_ANY)
-		tlb = memblock_alloc(bytes, PAGE_SIZE);
+		tlb = memblock_alloc(bytes, default_alignment);
 	else
-		tlb = memblock_alloc_low(bytes, PAGE_SIZE);
+		tlb = memblock_alloc_low(bytes, default_alignment);
 
 	if (!tlb) {
 		pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",