From: Dave Hansen <dave.hansen@linux.intel.com>
This uses the same logic and approach which were used for the physical
address limits with x86_phys_bits() and extends them to the virtual
address space.
Introduce a system-wide helper for users to query the size of the
virtual address space: x86_virt_bits()
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---
b/arch/x86/events/amd/brs.c | 2 +-
b/arch/x86/events/amd/lbr.c | 2 +-
b/arch/x86/events/intel/pt.c | 4 ++--
b/arch/x86/include/asm/processor.h | 5 +++++
b/arch/x86/kernel/cpu/proc.c | 2 +-
b/arch/x86/mm/maccess.c | 4 ++--
6 files changed, 12 insertions(+), 7 deletions(-)
On Thu, 2024-02-22 at 10:39 -0800, Dave Hansen wrote:
> From: Dave Hansen <dave.hansen@linux.intel.com>
>
> This uses the same logic and approach which were used for the physical
> address limits with x86_phys_bits() and extends them to the virtual
> address space.
>
> Introduce a system-wide helper for users to query the size of the
> virtual address space: x86_virt_bits()
>
> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
> ---
>
Reviewed-by: Kai Huang <kai.huang@intel.com>
@@ -285,7 +285,7 @@ void amd_brs_drain(void)
struct perf_branch_entry *br = cpuc->lbr_entries;
union amd_debug_extn_cfg cfg;
u32 i, nr = 0, num, tos, start;
- u32 shift = 64 - boot_cpu_data.x86_virt_bits;
+ u32 shift = 64 - x86_virt_bits();
/*
* BRS event forced on PMC0,
@@ -89,7 +89,7 @@ static __always_inline u64 amd_pmu_lbr_g
static __always_inline u64 sign_ext_branch_ip(u64 ip)
{
- u32 shift = 64 - boot_cpu_data.x86_virt_bits;
+ u32 shift = 64 - x86_virt_bits();
return (u64)(((s64)ip << shift) >> shift);
}
@@ -1453,8 +1453,8 @@ static void pt_event_addr_filters_sync(s
* canonical addresses does not affect the result of the
* address filter.
*/
- msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
- msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
+ msr_a = clamp_to_ge_canonical_addr(a, x86_virt_bits());
+ msr_b = clamp_to_le_canonical_addr(b, x86_virt_bits());
if (msr_b < msr_a)
msr_a = msr_b = 0;
}
@@ -772,4 +772,9 @@ static inline u8 x86_phys_bits(void)
return boot_cpu_data.x86_phys_bits;
}
+static inline u8 x86_virt_bits(void)
+{
+ return boot_cpu_data.x86_virt_bits;
+}
+
#endif /* _ASM_X86_PROCESSOR_H */
@@ -133,7 +133,7 @@ static int show_cpuinfo(struct seq_file
seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
- x86_phys_bits(), c->x86_virt_bits);
+ x86_phys_bits(), x86_virt_bits());
seq_puts(m, "power management:");
for (i = 0; i < 32; i++) {
@@ -20,10 +20,10 @@ bool copy_from_kernel_nofault_allowed(co
* is initialized. Needed for instruction decoding in early
* exception handlers.
*/
- if (!boot_cpu_data.x86_virt_bits)
+ if (!x86_virt_bits())
return true;
- return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ return __is_canonical_address(vaddr, x86_virt_bits());
}
#else
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)