[RFC,07/34] x86/mm: Introduce virtual address space limit helper

Message ID 20240222183935.286751FF@davehans-spike.ostc.intel.com
State New
Headers
Series x86: Rework system-wide configuration masquerading as per-cpu data |

Commit Message

Dave Hansen Feb. 22, 2024, 6:39 p.m. UTC
  From: Dave Hansen <dave.hansen@linux.intel.com>

This uses the same logic and approach which were used for the physical
address limits with x86_phys_bits() and extends them to the virtual
address space.

Introduce a system-wide helper for users to query the size of the
virtual address space: x86_virt_bits()

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---

 b/arch/x86/events/amd/brs.c        |    2 +-
 b/arch/x86/events/amd/lbr.c        |    2 +-
 b/arch/x86/events/intel/pt.c       |    4 ++--
 b/arch/x86/include/asm/processor.h |    5 +++++
 b/arch/x86/kernel/cpu/proc.c       |    2 +-
 b/arch/x86/mm/maccess.c            |    4 ++--
 6 files changed, 12 insertions(+), 7 deletions(-)
  

Comments

Kai Huang Feb. 27, 2024, 11:09 a.m. UTC | #1
On Thu, 2024-02-22 at 10:39 -0800, Dave Hansen wrote:
> From: Dave Hansen <dave.hansen@linux.intel.com>
> 
> This uses the same logic and approach which were used for the physical
> address limits with x86_phys_bits() and extends them to the virtual
> address space.
> 
> Introduce a system-wide helper for users to query the size of the
> virtual address space: x86_virt_bits()
> 
> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
> ---
> 

Reviewed-by: Kai Huang <kai.huang@intel.com>
  

Patch

diff -puN arch/x86/events/amd/brs.c~x86_virt_bits-func arch/x86/events/amd/brs.c
--- a/arch/x86/events/amd/brs.c~x86_virt_bits-func	2024-02-22 10:08:51.528573793 -0800
+++ b/arch/x86/events/amd/brs.c	2024-02-22 10:08:51.536574107 -0800
@@ -285,7 +285,7 @@  void amd_brs_drain(void)
 	struct perf_branch_entry *br = cpuc->lbr_entries;
 	union amd_debug_extn_cfg cfg;
 	u32 i, nr = 0, num, tos, start;
-	u32 shift = 64 - boot_cpu_data.x86_virt_bits;
+	u32 shift = 64 - x86_virt_bits();
 
 	/*
 	 * BRS event forced on PMC0,
diff -puN arch/x86/events/amd/lbr.c~x86_virt_bits-func arch/x86/events/amd/lbr.c
--- a/arch/x86/events/amd/lbr.c~x86_virt_bits-func	2024-02-22 10:08:51.528573793 -0800
+++ b/arch/x86/events/amd/lbr.c	2024-02-22 10:08:51.536574107 -0800
@@ -89,7 +89,7 @@  static __always_inline u64 amd_pmu_lbr_g
 
 static __always_inline u64 sign_ext_branch_ip(u64 ip)
 {
-	u32 shift = 64 - boot_cpu_data.x86_virt_bits;
+	u32 shift = 64 - x86_virt_bits();
 
 	return (u64)(((s64)ip << shift) >> shift);
 }
diff -puN arch/x86/events/intel/pt.c~x86_virt_bits-func arch/x86/events/intel/pt.c
--- a/arch/x86/events/intel/pt.c~x86_virt_bits-func	2024-02-22 10:08:51.528573793 -0800
+++ b/arch/x86/events/intel/pt.c	2024-02-22 10:08:51.536574107 -0800
@@ -1453,8 +1453,8 @@  static void pt_event_addr_filters_sync(s
 			 * canonical addresses does not affect the result of the
 			 * address filter.
 			 */
-			msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
-			msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
+			msr_a = clamp_to_ge_canonical_addr(a, x86_virt_bits());
+			msr_b = clamp_to_le_canonical_addr(b, x86_virt_bits());
 			if (msr_b < msr_a)
 				msr_a = msr_b = 0;
 		}
diff -puN arch/x86/include/asm/processor.h~x86_virt_bits-func arch/x86/include/asm/processor.h
--- a/arch/x86/include/asm/processor.h~x86_virt_bits-func	2024-02-22 10:08:51.532573950 -0800
+++ b/arch/x86/include/asm/processor.h	2024-02-22 10:08:51.536574107 -0800
@@ -772,4 +772,9 @@  static inline u8 x86_phys_bits(void)
 	return boot_cpu_data.x86_phys_bits;
 }
 
+static inline u8 x86_virt_bits(void)
+{
+	return boot_cpu_data.x86_virt_bits;
+}
+
 #endif /* _ASM_X86_PROCESSOR_H */
diff -puN arch/x86/kernel/cpu/proc.c~x86_virt_bits-func arch/x86/kernel/cpu/proc.c
--- a/arch/x86/kernel/cpu/proc.c~x86_virt_bits-func	2024-02-22 10:08:51.532573950 -0800
+++ b/arch/x86/kernel/cpu/proc.c	2024-02-22 10:08:51.536574107 -0800
@@ -133,7 +133,7 @@  static int show_cpuinfo(struct seq_file
 	seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
 	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
 	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
-		   x86_phys_bits(), c->x86_virt_bits);
+		   x86_phys_bits(), x86_virt_bits());
 
 	seq_puts(m, "power management:");
 	for (i = 0; i < 32; i++) {
diff -puN arch/x86/mm/maccess.c~x86_virt_bits-func arch/x86/mm/maccess.c
--- a/arch/x86/mm/maccess.c~x86_virt_bits-func	2024-02-22 10:08:51.536574107 -0800
+++ b/arch/x86/mm/maccess.c	2024-02-22 10:08:51.536574107 -0800
@@ -20,10 +20,10 @@  bool copy_from_kernel_nofault_allowed(co
 	 * is initialized.  Needed for instruction decoding in early
 	 * exception handlers.
 	 */
-	if (!boot_cpu_data.x86_virt_bits)
+	if (!x86_virt_bits())
 		return true;
 
-	return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+	return __is_canonical_address(vaddr, x86_virt_bits());
 }
 #else
 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)