[v5,03/15] x86/mtrr: replace some constants with defines

Message ID 20230401063652.23522-4-jgross@suse.com
State New
Headers
Series x86/mtrr: fix handling with PAT but without MTRR |

Commit Message

Juergen Gross April 1, 2023, 6:36 a.m. UTC
  Instead of using constants in MTRR code, use some new #defines.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V5:
- new patch (inspired by a request of Boris Petkov)
---
 arch/x86/include/asm/mtrr.h        | 25 +++++++++++++--
 arch/x86/kernel/cpu/mtrr/cleanup.c |  2 +-
 arch/x86/kernel/cpu/mtrr/generic.c | 51 +++++++++++++++++-------------
 arch/x86/kernel/cpu/mtrr/mtrr.c    |  2 +-
 4 files changed, 54 insertions(+), 26 deletions(-)
  

Comments

Borislav Petkov April 3, 2023, 4:03 p.m. UTC | #1
On Sat, Apr 01, 2023 at 08:36:40AM +0200, Juergen Gross wrote:
> @@ -643,10 +646,12 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
>  	unsigned int lo, hi;
>  	bool changed = false;
>  
> +#define BASE_MASK	(MTRR_BASE_TYPE_MASK | (size_and_mask << PAGE_SHIFT))
> +#define MASK_MASK	(MTRR_MASK_VALID | (size_and_mask << PAGE_SHIFT))

No, "MASK_MASK" is too much. :-)

Anyway, so I started looking at this and here's what I'm seeing on my
machine even with the old code:

        rdmsr(MTRRphysBase_MSR(index), lo, hi);
        if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
            || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
                (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {


size_and_mask:		0x0000000ffff00000
the shifted version:	0x000000000000ffff

which is bits [15:0]

Now, looking at MTRRphysBase_MSR's definition, the high 32 bits are:

[63 ... reserved ... ][ max_addr ... 32 ]

and that second slice: from max_addr to the 32nd bit of the whole MSR is
the second part of PhysBase.

max_addr aka phys_addr comes from:

	phys_addr = cpuid_eax(0x80000008) & 0xff;

on that machine, that value is 48.

Which means, the physaddr slice should be [48 .. 32], i.e.,

	0x0001ffff00000000

and when you shift that by 32 so that it can be ANDed with the high
portion of the MSR, it becomes

0x000000000001ffff

i.e., bit 16 is set too.

And that max address can be up to 51:

"Range Physical Base-Address (PhysBase)—Bits 51:12. The memory-range base-address in
physical-address space. PhysBase is aligned on a 4-Kbyte (or greater) address in the 52-bit
physical-address space supported by the AMD64 architecture. PhysBase represents the most-
significant 40-address bits of the physical address. Physical-address
bits 11:0 are assumed to be 0."

Long story short, size_and_mask needs to be done from

	size_and_mask = ~size_or_mask & 0xfffff00000ULL;

to

	size_and_mask = ~size_or_mask & GENMASK_ULL(phys_addr, 20);

size_or_mask bits already took into consideration the phys_addr:

	size_or_mask = SIZE_OR_MASK_BITS(phys_addr);

and size_and_mask needs to do it too.

Right?

I'll test this on the boxes here everywhere. I guess this gets hit only
on boxes where the phys_addr of the variable MTRRs goes over the 16
bits.

As to this patch: please make all the bit and mask definitions you're
adding as close to the MTRR bit and mask definition names in the
APM+SDM. I've started this already (ontop of yours):

---

diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 4e59f7854950..a768a8716980 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -28,7 +28,7 @@
 
 /* Defines for hardware MTRR registers. */
 #define MTRR_CONFIG_NUM_VAR_MASK	GENMASK(7, 0)
-#define MTRR_CONFIG_HAVE_FIXED		BIT_MASK(8)
+#define MTRR_CAP_FIX			BIT_MASK(8)
 #define MTRR_CONFIG_HAVE_WC		BIT_MASK(10)
 
 #define MTRR_DEFTYPE_TYPE_MASK		GENMASK(7, 0)
@@ -39,9 +39,9 @@
 #define MTRR_DEFTYPE_DISABLE_MASK	~(MTRR_DEFTYPE_TYPE_MASK |	\
 					  MTRR_DEFTYPE_ENABLE_MASK)
 
-#define MTRR_BASE_TYPE_MASK		GENMASK_ULL(7, 0)
+#define MTRR_BASE_TYPE_MASK		GENMASK(7, 0)
 
-#define MTRR_MASK_VALID			BIT_ULL_MASK(11)
+#define MTRR_PHYS_MASK_VALID		BIT_ULL_MASK(11)
 
 /*
  * The following functions are for use by other drivers that cannot use
@@ -140,9 +140,7 @@ struct mtrr_gentry32 {
 
 /* Bit fields for enabled in struct mtrr_state_type */
 #define MTRR_STATE_SHIFT		10
-#define MTRR_STATE_MTRR_FIXED_ENABLED	\
-			(MTRR_DEFTYPE_FIXED_ENABLED >> MTRR_STATE_SHIFT)
-#define MTRR_STATE_MTRR_ENABLED		\
-			(MTRR_DEFTYPE_ENABLED >> MTRR_STATE_SHIFT)
+#define MTRR_STATE_MTRR_FIXED_ENABLED	(MTRR_DEFTYPE_FIXED_ENABLED >> MTRR_STATE_SHIFT)
+#define MTRR_STATE_MTRR_ENABLED		(MTRR_DEFTYPE_ENABLED >> MTRR_STATE_SHIFT)
 
 #endif /* _ASM_X86_MTRR_H */
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 9a12da76635c..8f8b7775a5ac 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -171,7 +171,7 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
 	for (i = 0; i < num_var_ranges; ++i) {
 		unsigned short start_state, end_state, inclusive;
 
-		if (!(mtrr_state.var_ranges[i].mask_lo & MTRR_MASK_VALID))
+		if (!(mtrr_state.var_ranges[i].mask_lo & MTRR_PHYS_MASK_VALID))
 			continue;
 
 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
@@ -223,8 +223,7 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
 		if ((start & mask) != (base & mask))
 			continue;
 
-		curr_match = mtrr_state.var_ranges[i].base_lo &
-			     MTRR_BASE_TYPE_MASK;
+		curr_match = mtrr_state.var_ranges[i].base_lo & MTRR_BASE_TYPE_MASK;
 		if (prev_match == MTRR_TYPE_INVALID) {
 			prev_match = curr_match;
 			continue;
@@ -426,7 +425,7 @@ static void __init print_mtrr_state(void)
 	high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
 
 	for (i = 0; i < num_var_ranges; ++i) {
-		if (mtrr_state.var_ranges[i].mask_lo & MTRR_MASK_VALID)
+		if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYS_MASK_VALID)
 			pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
 				 i,
 				 high_width,
@@ -454,7 +453,7 @@ bool __init get_mtrr_state(void)
 	vrs = mtrr_state.var_ranges;
 
 	rdmsr(MSR_MTRRcap, lo, dummy);
-	mtrr_state.have_fixed = !!(lo & MTRR_CONFIG_HAVE_FIXED);
+	mtrr_state.have_fixed = !!(lo & MTRR_CAP_FIX);
 
 	for (i = 0; i < num_var_ranges; i++)
 		get_mtrr_var_range(i, &vrs[i]);
@@ -463,8 +462,7 @@ bool __init get_mtrr_state(void)
 
 	rdmsr(MSR_MTRRdefType, lo, dummy);
 	mtrr_state.def_type = lo & MTRR_DEFTYPE_TYPE_MASK;
-	mtrr_state.enabled = (lo & MTRR_DEFTYPE_ENABLE_MASK) >>
-			     MTRR_STATE_SHIFT;
+	mtrr_state.enabled = (lo & MTRR_DEFTYPE_ENABLE_MASK) >> MTRR_STATE_SHIFT;
 
 	if (amd_special_default_mtrr()) {
 		unsigned low, high;
@@ -576,8 +574,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 	get_cpu();
 
 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
-
-	if ((mask_lo & MTRR_MASK_VALID) == 0) {
+	if (!(mask_lo & MTRR_PHYS_MASK_VALID)) {
 		/*  Invalid (i.e. free) range */
 		*base = 0;
 		*size = 0;
@@ -647,7 +644,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
 	bool changed = false;
 
 #define BASE_MASK	(MTRR_BASE_TYPE_MASK | (size_and_mask << PAGE_SHIFT))
-#define MASK_MASK	(MTRR_MASK_VALID | (size_and_mask << PAGE_SHIFT))
+#define MASK_MASK	(MTRR_PHYS_MASK_VALID | (size_and_mask << PAGE_SHIFT))
 
 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
 	if ((vr->base_lo & BASE_MASK) != (lo & BASE_MASK)
@@ -770,7 +767,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
 	} else {
 		vr->base_lo = base << PAGE_SHIFT | type;
 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
-		vr->mask_lo = -size << PAGE_SHIFT | MTRR_MASK_VALID;
+		vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYS_MASK_VALID;
 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
 
 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);

Thx.
  
Juergen Gross April 5, 2023, 7:55 a.m. UTC | #2
On 03.04.23 18:03, Borislav Petkov wrote:
> On Sat, Apr 01, 2023 at 08:36:40AM +0200, Juergen Gross wrote:
>> @@ -643,10 +646,12 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
>>   	unsigned int lo, hi;
>>   	bool changed = false;
>>   
>> +#define BASE_MASK	(MTRR_BASE_TYPE_MASK | (size_and_mask << PAGE_SHIFT))
>> +#define MASK_MASK	(MTRR_MASK_VALID | (size_and_mask << PAGE_SHIFT))
> 
> No, "MASK_MASK" is too much. :-)

Any better suggestion for the name? :-)

> Anyway, so I started looking at this and here's what I'm seeing on my
> machine even with the old code:
> 
>          rdmsr(MTRRphysBase_MSR(index), lo, hi);
>          if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
>              || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
>                  (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
> 
> 
> size_and_mask:		0x0000000ffff00000
> the shifted version:	0x000000000000ffff
> 
> which is bits [15:0]
> 
> Now, looking at MTRRphysBase_MSR's definition, the high 32 bits are:
> 
> [63 ... reserved ... ][ max_addr ... 32 ]
> 
> and that second slice: from max_addr to the 32nd bit of the whole MSR is
> the second part of PhysBase.
> 
> max_addr aka phys_addr comes from:
> 
> 	phys_addr = cpuid_eax(0x80000008) & 0xff;
> 
> on that machine, that value is 48.
> 
> Which means, the physaddr slice should be [48 .. 32], i.e.,
> 
> 	0x0001ffff00000000

No. The "48" is the _number_ of physical address bits, so the 64 bit address
mask will be 0000ffff.ffffffff (48 bits set).

> 
> and when you shift that by 32 so that it can be ANDed with the high
> portion of the MSR, it becomes
> 
> 0x000000000001ffff
> 
> i.e., bit 16 is set too.
> 
> And that max address can be up to 51:
> 
> "Range Physical Base-Address (PhysBase)—Bits 51:12. The memory-range base-address in
> physical-address space. PhysBase is aligned on a 4-Kbyte (or greater) address in the 52-bit
> physical-address space supported by the AMD64 architecture. PhysBase represents the most-
> significant 40-address bits of the physical address. Physical-address
> bits 11:0 are assumed to be 0."
> 
> Long story short, size_and_mask needs to be done from
> 
> 	size_and_mask = ~size_or_mask & 0xfffff00000ULL;
> 
> to
> 
> 	size_and_mask = ~size_or_mask & GENMASK_ULL(phys_addr, 20);

This must be phys_addr - 1, as we start to count with bit 0.

> 
> size_or_mask bits already took into consideration the phys_addr:
> 
> 	size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
> 
> and size_and_mask needs to do it too.
> 
> Right?
> 
> I'll test this on the boxes here everywhere. I guess this gets hit only
> on boxes where the phys_addr of the variable MTRRs goes over the 16
> bits.
> 
> As to this patch: please make all the bit and mask definitions you're
> adding as close to the MTRR bit and mask definition names in the
> APM+SDM. I've started this already (ontop of yours):

Okay.


Juergen
  
Borislav Petkov April 5, 2023, 8:26 p.m. UTC | #3
On Wed, Apr 05, 2023 at 09:55:59AM +0200, Juergen Gross wrote:
> On 03.04.23 18:03, Borislav Petkov wrote:
> > On Sat, Apr 01, 2023 at 08:36:40AM +0200, Juergen Gross wrote:
> > > @@ -643,10 +646,12 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
> > >   	unsigned int lo, hi;
> > >   	bool changed = false;
> > > +#define BASE_MASK	(MTRR_BASE_TYPE_MASK | (size_and_mask << PAGE_SHIFT))
> > > +#define MASK_MASK	(MTRR_MASK_VALID | (size_and_mask << PAGE_SHIFT))
> > 
> > No, "MASK_MASK" is too much. :-)
> 
> Any better suggestion for the name? :-)

Looking at this again, what this is actually doing is masking out the
reserved bits. But in an unnecessarily complicated way.

What it should do, instead, is do that explicitly:

	/* Zap the reserved bits and compare only the valid fields: */
	if (((vr->base_lo & ~RESV_LOW) != (lo & ~RESV_LOW)) ||
	    ((vr->base_hi & ~RESV_HI) != (hi & ~RESV_HI)))

where

#define RESV_LOW	GENMASK_ULL(8, 11)
#define RESV_HI		GENMASK(phys_addr - 1, 63)

and then we can get rid of that size_or_mask and size_and_mask
stupidity.

I think that would simplify this variable ranges handling code a lot
more and make it pretty straightforward...

> No. The "48" is the _number_ of physical address bits, so the 64 bit address
> mask will be 0000ffff.ffffffff (48 bits set).

Uff, sorry about that. I got confused by that SIZE_OR_MASK_BITS() where
phys_addr and not phys_addr - 1 works because it the arithmetic works
with starting bit 0.
  
Juergen Gross April 11, 2023, 1:32 p.m. UTC | #4
On 05.04.23 22:26, Borislav Petkov wrote:
> On Wed, Apr 05, 2023 at 09:55:59AM +0200, Juergen Gross wrote:
>> On 03.04.23 18:03, Borislav Petkov wrote:
>>> On Sat, Apr 01, 2023 at 08:36:40AM +0200, Juergen Gross wrote:
>>>> @@ -643,10 +646,12 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
>>>>    	unsigned int lo, hi;
>>>>    	bool changed = false;
>>>> +#define BASE_MASK	(MTRR_BASE_TYPE_MASK | (size_and_mask << PAGE_SHIFT))
>>>> +#define MASK_MASK	(MTRR_MASK_VALID | (size_and_mask << PAGE_SHIFT))
>>>
>>> No, "MASK_MASK" is too much. :-)
>>
>> Any better suggestion for the name? :-)
> 
> Looking at this again, what this is actually doing is masking out the
> reserved bits. But in an unnecessarily complicated way.
> 
> What it should do, instead, is do that explicitly:
> 
> 	/* Zap the reserved bits and compare only the valid fields: */
> 	if (((vr->base_lo & ~RESV_LOW) != (lo & ~RESV_LOW)) ||
> 	    ((vr->base_hi & ~RESV_HI) != (hi & ~RESV_HI)))
> 
> where
> 
> #define RESV_LOW	GENMASK_ULL(8, 11)
> #define RESV_HI		GENMASK(phys_addr - 1, 63)
> 
> and then we can get rid of that size_or_mask and size_and_mask
> stupidity.
> 
> I think that would simplify this variable ranges handling code a lot
> more and make it pretty straightforward...

Yeah, probably a good idea.


Juergen
  

Patch

diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index f0eeaf6e5f5f..4e59f7854950 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -23,8 +23,26 @@ 
 #ifndef _ASM_X86_MTRR_H
 #define _ASM_X86_MTRR_H
 
+#include <linux/bits.h>
 #include <uapi/asm/mtrr.h>
 
+/* Defines for hardware MTRR registers. */
+#define MTRR_CONFIG_NUM_VAR_MASK	GENMASK(7, 0)
+#define MTRR_CONFIG_HAVE_FIXED		BIT_MASK(8)
+#define MTRR_CONFIG_HAVE_WC		BIT_MASK(10)
+
+#define MTRR_DEFTYPE_TYPE_MASK		GENMASK(7, 0)
+#define MTRR_DEFTYPE_FIXED_ENABLED	BIT_MASK(10)
+#define MTRR_DEFTYPE_ENABLED		BIT_MASK(11)
+#define MTRR_DEFTYPE_ENABLE_MASK	(MTRR_DEFTYPE_FIXED_ENABLED |	\
+					 MTRR_DEFTYPE_ENABLED)
+#define MTRR_DEFTYPE_DISABLE_MASK	~(MTRR_DEFTYPE_TYPE_MASK |	\
+					  MTRR_DEFTYPE_ENABLE_MASK)
+
+#define MTRR_BASE_TYPE_MASK		GENMASK_ULL(7, 0)
+
+#define MTRR_MASK_VALID			BIT_ULL_MASK(11)
+
 /*
  * The following functions are for use by other drivers that cannot use
  * arch_phys_wc_add and arch_phys_wc_del.
@@ -121,7 +139,10 @@  struct mtrr_gentry32 {
 #endif /* CONFIG_COMPAT */
 
 /* Bit fields for enabled in struct mtrr_state_type */
-#define MTRR_STATE_MTRR_FIXED_ENABLED	0x01
-#define MTRR_STATE_MTRR_ENABLED		0x02
+#define MTRR_STATE_SHIFT		10
+#define MTRR_STATE_MTRR_FIXED_ENABLED	\
+			(MTRR_DEFTYPE_FIXED_ENABLED >> MTRR_STATE_SHIFT)
+#define MTRR_STATE_MTRR_ENABLED		\
+			(MTRR_DEFTYPE_ENABLED >> MTRR_STATE_SHIFT)
 
 #endif /* _ASM_X86_MTRR_H */
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index b5f43049fa5f..ce45d7617874 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -890,7 +890,7 @@  int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
 		return 0;
 
 	rdmsr(MSR_MTRRdefType, def, dummy);
-	def &= 0xff;
+	def &= MTRR_DEFTYPE_TYPE_MASK;
 	if (def != MTRR_TYPE_UNCACHABLE)
 		return 0;
 
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ee09d359e08f..9a12da76635c 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -171,7 +171,7 @@  static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
 	for (i = 0; i < num_var_ranges; ++i) {
 		unsigned short start_state, end_state, inclusive;
 
-		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
+		if (!(mtrr_state.var_ranges[i].mask_lo & MTRR_MASK_VALID))
 			continue;
 
 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
@@ -223,7 +223,8 @@  static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
 		if ((start & mask) != (base & mask))
 			continue;
 
-		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
+		curr_match = mtrr_state.var_ranges[i].base_lo &
+			     MTRR_BASE_TYPE_MASK;
 		if (prev_match == MTRR_TYPE_INVALID) {
 			prev_match = curr_match;
 			continue;
@@ -425,7 +426,7 @@  static void __init print_mtrr_state(void)
 	high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
 
 	for (i = 0; i < num_var_ranges; ++i) {
-		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
+		if (mtrr_state.var_ranges[i].mask_lo & MTRR_MASK_VALID)
 			pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
 				 i,
 				 high_width,
@@ -434,7 +435,8 @@  static void __init print_mtrr_state(void)
 				 high_width,
 				 mtrr_state.var_ranges[i].mask_hi,
 				 mtrr_state.var_ranges[i].mask_lo >> 12,
-				 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
+				 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo &
+						    MTRR_BASE_TYPE_MASK));
 		else
 			pr_debug("  %u disabled\n", i);
 	}
@@ -452,7 +454,7 @@  bool __init get_mtrr_state(void)
 	vrs = mtrr_state.var_ranges;
 
 	rdmsr(MSR_MTRRcap, lo, dummy);
-	mtrr_state.have_fixed = (lo >> 8) & 1;
+	mtrr_state.have_fixed = !!(lo & MTRR_CONFIG_HAVE_FIXED);
 
 	for (i = 0; i < num_var_ranges; i++)
 		get_mtrr_var_range(i, &vrs[i]);
@@ -460,8 +462,9 @@  bool __init get_mtrr_state(void)
 		get_fixed_ranges(mtrr_state.fixed_ranges);
 
 	rdmsr(MSR_MTRRdefType, lo, dummy);
-	mtrr_state.def_type = (lo & 0xff);
-	mtrr_state.enabled = (lo & 0xc00) >> 10;
+	mtrr_state.def_type = lo & MTRR_DEFTYPE_TYPE_MASK;
+	mtrr_state.enabled = (lo & MTRR_DEFTYPE_ENABLE_MASK) >>
+			     MTRR_STATE_SHIFT;
 
 	if (amd_special_default_mtrr()) {
 		unsigned low, high;
@@ -574,7 +577,7 @@  static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 
 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
 
-	if ((mask_lo & 0x800) == 0) {
+	if ((mask_lo & MTRR_MASK_VALID) == 0) {
 		/*  Invalid (i.e. free) range */
 		*base = 0;
 		*size = 0;
@@ -606,7 +609,7 @@  static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 	 */
 	*size = -mask;
 	*base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
-	*type = base_lo & 0xff;
+	*type = base_lo & MTRR_BASE_TYPE_MASK;
 
 out_put_cpu:
 	put_cpu();
@@ -643,10 +646,12 @@  static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
 	unsigned int lo, hi;
 	bool changed = false;
 
+#define BASE_MASK	(MTRR_BASE_TYPE_MASK | (size_and_mask << PAGE_SHIFT))
+#define MASK_MASK	(MTRR_MASK_VALID | (size_and_mask << PAGE_SHIFT))
+
 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
-	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
-	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
-		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
+	if ((vr->base_lo & BASE_MASK) != (lo & BASE_MASK)
+	    || (vr->base_hi & (BASE_MASK >> 32)) != (hi & (BASE_MASK >> 32))) {
 
 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
 		changed = true;
@@ -654,9 +659,8 @@  static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
 
 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
 
-	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
-	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
-		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
+	if ((vr->mask_lo & MASK_MASK) != (lo & MASK_MASK)
+	    || (vr->mask_hi & (MASK_MASK >> 32)) != (hi & (MASK_MASK >> 32))) {
 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
 		changed = true;
 	}
@@ -691,11 +695,13 @@  static unsigned long set_mtrr_state(void)
 	 * Set_mtrr_restore restores the old value of MTRRdefType,
 	 * so to set it we fiddle with the saved value:
 	 */
-	if ((deftype_lo & 0xff) != mtrr_state.def_type
-	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
+	if ((deftype_lo & MTRR_DEFTYPE_TYPE_MASK) != mtrr_state.def_type
+	    || ((deftype_lo & MTRR_DEFTYPE_ENABLE_MASK) >> MTRR_STATE_SHIFT) !=
+	       mtrr_state.enabled) {
 
-		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
-			     (mtrr_state.enabled << 10);
+		deftype_lo = (deftype_lo & MTRR_DEFTYPE_DISABLE_MASK) |
+			     mtrr_state.def_type |
+			     (mtrr_state.enabled << MTRR_STATE_SHIFT);
 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
 	}
 
@@ -708,7 +714,8 @@  void mtrr_disable(void)
 	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 
 	/* Disable MTRRs, and set the default type to uncached */
-	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEFTYPE_DISABLE_MASK,
+		   deftype_hi);
 }
 
 void mtrr_enable(void)
@@ -763,7 +770,7 @@  static void generic_set_mtrr(unsigned int reg, unsigned long base,
 	} else {
 		vr->base_lo = base << PAGE_SHIFT | type;
 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
-		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
+		vr->mask_lo = -size << PAGE_SHIFT | MTRR_MASK_VALID;
 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
 
 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
@@ -817,7 +824,7 @@  static int generic_have_wrcomb(void)
 {
 	unsigned long config, dummy;
 	rdmsr(MSR_MTRRcap, config, dummy);
-	return config & (1 << 10);
+	return config & MTRR_CONFIG_HAVE_WC;
 }
 
 int positive_have_wrcomb(void)
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index ce0b82209ad3..1beb38f7a7a3 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -117,7 +117,7 @@  static void __init set_num_var_ranges(bool use_generic)
 	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
 		config = 8;
 
-	num_var_ranges = config & 0xff;
+	num_var_ranges = config & MTRR_CONFIG_NUM_VAR_MASK;
 }
 
 static void __init init_table(void)