Modern CPUs all share the same MTRR interface implemented via
generic_mtrr_ops.
At several places in MTRR code this generic interface is deduced via
is_cpu(INTEL) tests, which is only working due to X86_VENDOR_INTEL
being 0 (the is_cpu() macro is testing mtrr_if->vendor, which isn't
explicitly set in generic_mtrr_ops).
Test the generic CPU feature X86_FEATURE_MTRR instead.
The only other place where the .vendor member of struct mtrr_ops is
being used is in set_num_var_ranges(), where depending on the vendor
the number of MTRR registers is determined. This can easily be changed
by replacing .vendor with the static number of MTRR registers.
It should be noted that the test "is_cpu(HYGON)" wasn't ever returning
true, as there is no struct mtrr_ops with that vendor information.
Signed-off-by: Juergen Gross <jgross@suse.com>
Tested-by: Michael Kelley <mikelley@microsoft.com>
---
V3:
- new patch
V4:
- use cpu_feature_enabled(X86_FEATURE_MTRR) for testing generic MTRRs
(Boris Petkov)
V6:
- eliminate set_num_var_ranges() (Boris Petkov)
---
arch/x86/kernel/cpu/mtrr/amd.c | 2 +-
arch/x86/kernel/cpu/mtrr/centaur.c | 2 +-
arch/x86/kernel/cpu/mtrr/cleanup.c | 4 ++--
arch/x86/kernel/cpu/mtrr/cyrix.c | 2 +-
arch/x86/kernel/cpu/mtrr/generic.c | 2 +-
arch/x86/kernel/cpu/mtrr/mtrr.c | 24 ++++++++----------------
arch/x86/kernel/cpu/mtrr/mtrr.h | 4 +---
7 files changed, 15 insertions(+), 25 deletions(-)
On Tue, May 02, 2023 at 02:09:21PM +0200, Juergen Gross wrote:
> Modern CPUs all share the same MTRR interface implemented via
> generic_mtrr_ops.
>
> At several places in MTRR code this generic interface is deduced via
> is_cpu(INTEL) tests, which is only working due to X86_VENDOR_INTEL
> being 0 (the is_cpu() macro is testing mtrr_if->vendor, which isn't
> explicitly set in generic_mtrr_ops).
>
> Test the generic CPU feature X86_FEATURE_MTRR instead.
Doesn't work for the MTRRs disabled case - mtrr_trim_uncached_memory()
gets called directly, where mtrr_if can be NULL.
[ 0.006508] MTRRs disabled by BIOS
[ 0.006522] x86/PAT: Configuration [0-7]: WB WC UC- UC WB WP UC- WT
[ 0.006597] !mtrr_if
[ 0.006609] CPU: 0 PID: 0 Comm: swapper Not tainted 6.4.0-rc1+ #1
[ 0.006626] Hardware name: Acer AOA150/, BIOS v0.3309 10/06/2008
[ 0.006638] Call Trace:
[ 0.006663] dump_stack_lvl+0x6a/0xdc
[ 0.006697] dump_stack+0xd/0x14
[ 0.006719] mtrr_trim_uncached_memory+0x317/0x440
[ 0.006761] ? __this_cpu_preempt_check+0xf/0x1c
[ 0.006814] ? _raw_spin_unlock+0x27/0x50
[ 0.006835] ? cache_enable+0x3d/0x44
[ 0.006869] setup_arch+0x746/0xcac
[ 0.006887] ? vprintk_default+0x15/0x1c
[ 0.006922] ? vprintk+0x55/0x64
[ 0.006953] start_kernel+0x56/0x630
[ 0.006974] ? idt_setup_early_handler+0x82/0x9c
[ 0.007007] i386_start_kernel+0x48/0x48
[ 0.007027] startup_32_smp+0x156/0x158
I've done this ontop. mtrr_enabled() must be used now before caller
needs to do any mtrr_if-> accesses.
And yes, the MTRR code needs more scrubbing.
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 4a979086f00e..ed5f84c20ac2 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -689,6 +689,9 @@ int __init mtrr_cleanup(void)
int index_good;
int i;
+ if (!mtrr_enabled())
+ return 0;
+
if (!cpu_feature_enabled(X86_FEATURE_MTRR) || enable_mtrr_cleanup < 1)
return 0;
@@ -882,6 +885,9 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
+ if (!mtrr_enabled())
+ return 0;
+
/*
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index bcde9a754f62..ec8670bb5d88 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -59,10 +59,6 @@
#define MTRR_TO_PHYS_WC_OFFSET 1000
u32 num_var_ranges;
-static bool mtrr_enabled(void)
-{
- return !!mtrr_if;
-}
unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
DEFINE_MUTEX(mtrr_mutex);
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 81babff29c59..4463a8db6f4e 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -80,3 +80,12 @@ extern const struct mtrr_ops centaur_mtrr_ops;
extern int changed_by_mtrr_cleanup;
extern int mtrr_cleanup(void);
void generic_rebuild_map(void);
+
+/*
+ * Must be used by code which uses mtrr_if to call platform-specific
+ * MTRR manipulation functions.
+ */
+static inline bool mtrr_enabled(void)
+{
+ return !!mtrr_if;
+}
@@ -110,7 +110,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
}
const struct mtrr_ops amd_mtrr_ops = {
- .vendor = X86_VENDOR_AMD,
+ .var_regs = 2,
.set = amd_set_mtrr,
.get = amd_get_mtrr,
.get_free_region = generic_get_free_region,
@@ -112,7 +112,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
}
const struct mtrr_ops centaur_mtrr_ops = {
- .vendor = X86_VENDOR_CENTAUR,
+ .var_regs = 8,
.set = centaur_set_mcr,
.get = centaur_get_mcr,
.get_free_region = centaur_get_free_region,
@@ -689,7 +689,7 @@ int __init mtrr_cleanup(void)
int index_good;
int i;
- if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
+ if (!cpu_feature_enabled(X86_FEATURE_MTRR) || enable_mtrr_cleanup < 1)
return 0;
rdmsr(MSR_MTRRdefType, def, dummy);
@@ -886,7 +886,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
*/
- if (!is_cpu(INTEL) || disable_mtrr_trim)
+ if (!cpu_feature_enabled(X86_FEATURE_MTRR) || disable_mtrr_trim)
return 0;
rdmsr(MSR_MTRRdefType, def, dummy);
@@ -235,7 +235,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
}
const struct mtrr_ops cyrix_mtrr_ops = {
- .vendor = X86_VENDOR_CYRIX,
+ .var_regs = 8,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
.get_free_region = cyrix_get_free_region,
@@ -853,7 +853,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
* For Intel PPro stepping <= 7
* must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
*/
- if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
+ if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_stepping <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
@@ -103,21 +103,6 @@ static int have_wrcomb(void)
return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
}
-/* This function returns the number of variable MTRRs */
-static void __init set_num_var_ranges(bool use_generic)
-{
- unsigned long config = 0, dummy;
-
- if (use_generic)
- rdmsr(MSR_MTRRcap, config, dummy);
- else if (is_cpu(AMD) || is_cpu(HYGON))
- config = 2;
- else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
- config = 8;
-
- num_var_ranges = config & MTRR_CAP_VCNT;
-}
-
static void __init init_table(void)
{
int i, max;
@@ -627,6 +612,7 @@ void __init mtrr_bp_init(void)
{
bool generic_mtrrs = cpu_feature_enabled(X86_FEATURE_MTRR);
const char *why = "(not available)";
+ unsigned long config, dummy;
mtrr_set_mask();
@@ -664,7 +650,13 @@ void __init mtrr_bp_init(void)
}
if (mtrr_enabled()) {
- set_num_var_ranges(mtrr_if == &generic_mtrr_ops);
+ /* Get the number of variable MTRR ranges. */
+ if (mtrr_if == &generic_mtrr_ops)
+ rdmsr(MSR_MTRRcap, config, dummy);
+ else
+ config = mtrr_if->var_regs;
+ num_var_ranges = config & MTRR_CAP_VCNT;
+
init_table();
if (mtrr_if == &generic_mtrr_ops) {
/* BIOS may override */
@@ -13,7 +13,7 @@
extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
- u32 vendor;
+ u32 var_regs;
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*get)(unsigned int reg, unsigned long *base,
@@ -53,8 +53,6 @@ bool get_mtrr_state(void);
extern const struct mtrr_ops *mtrr_if;
-#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
-
extern unsigned int num_var_ranges;
extern u64 mtrr_tom2;
extern struct mtrr_state_type mtrr_state;