[v2,1/8] x86/mtrr: split off physical address size calculation

Message ID 20230209072220.6836-2-jgross@suse.com
State New
Headers
Series x86/mtrr: fix handling with PAT but without MTRR |

Commit Message

Juergen Gross Feb. 9, 2023, 7:22 a.m. UTC
  Move the calculation of the physical address size in mtrr_bp_init()
into a helper function. This will be needed later.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
---
 arch/x86/kernel/cpu/mtrr/mtrr.c | 70 ++++++++++++++++-----------------
 1 file changed, 33 insertions(+), 37 deletions(-)
  

Comments

Borislav Petkov Feb. 11, 2023, 10:08 a.m. UTC | #1
On Thu, Feb 09, 2023 at 08:22:13AM +0100, Juergen Gross wrote:
> @@ -654,42 +638,54 @@ void __init mtrr_bp_init(void)
>  			    (boot_cpu_data.x86_stepping == 0x3 ||
>  			     boot_cpu_data.x86_stepping == 0x4))
>  				phys_addr = 36;
> -
> -			size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
> -			size_and_mask = ~size_or_mask & 0xfffff00000ULL;
>  		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
>  			   boot_cpu_data.x86 == 6) {
>  			/*
>  			 * VIA C* family have Intel style MTRRs,
>  			 * but don't support PAE
>  			 */
> -			size_or_mask = SIZE_OR_MASK_BITS(32);
> -			size_and_mask = 0;
>  			phys_addr = 32;
>  		}
> +	}
> +
> +	size_or_mask = ~((1ULL << ((phys_addr) - PAGE_SHIFT)) - 1);

Too many brackets because you've taken the macro and put in the argument
directly.

In any case, reviewing patches which do code movement *and* changes in
the same diff is always unnecessarily nasty. Please do the mechanical
code movement only - cleanups come ontop.

Thx.
  
Juergen Gross Feb. 13, 2023, 6:19 a.m. UTC | #2
On 11.02.23 11:08, Borislav Petkov wrote:
> On Thu, Feb 09, 2023 at 08:22:13AM +0100, Juergen Gross wrote:
>> @@ -654,42 +638,54 @@ void __init mtrr_bp_init(void)
>>   			    (boot_cpu_data.x86_stepping == 0x3 ||
>>   			     boot_cpu_data.x86_stepping == 0x4))
>>   				phys_addr = 36;
>> -
>> -			size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
>> -			size_and_mask = ~size_or_mask & 0xfffff00000ULL;
>>   		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
>>   			   boot_cpu_data.x86 == 6) {
>>   			/*
>>   			 * VIA C* family have Intel style MTRRs,
>>   			 * but don't support PAE
>>   			 */
>> -			size_or_mask = SIZE_OR_MASK_BITS(32);
>> -			size_and_mask = 0;
>>   			phys_addr = 32;
>>   		}
>> +	}
>> +
>> +	size_or_mask = ~((1ULL << ((phys_addr) - PAGE_SHIFT)) - 1);
> 
> Too many brackets because you've taken the macro and put in the argument
> directly.

Oh, yes.

> In any case, reviewing patches which do code movement *and* changes in
> the same diff is always unnecessarily nasty. Please do the mechanical
> code movement only - cleanups come ontop.

Okay.


Juergen
  

Patch

diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 783f3210d582..542ca5639dfd 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -617,27 +617,11 @@  static struct syscore_ops mtrr_syscore_ops = {
 	.resume		= mtrr_restore,
 };
 
-int __initdata changed_by_mtrr_cleanup;
-
-#define SIZE_OR_MASK_BITS(n)  (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
-/**
- * mtrr_bp_init - initialize mtrrs on the boot CPU
- *
- * This needs to be called early; before any of the other CPUs are
- * initialized (i.e. before smp_init()).
- *
- */
-void __init mtrr_bp_init(void)
+static unsigned int __init mtrr_calc_physbits(bool generic)
 {
-	const char *why = "(not available)";
-	u32 phys_addr;
-
-	phys_addr = 32;
+	unsigned int phys_addr = 32;
 
-	if (boot_cpu_has(X86_FEATURE_MTRR)) {
-		mtrr_if = &generic_mtrr_ops;
-		size_or_mask = SIZE_OR_MASK_BITS(36);
-		size_and_mask = 0x00f00000;
+	if (generic) {
 		phys_addr = 36;
 
 		/*
@@ -654,42 +638,54 @@  void __init mtrr_bp_init(void)
 			    (boot_cpu_data.x86_stepping == 0x3 ||
 			     boot_cpu_data.x86_stepping == 0x4))
 				phys_addr = 36;
-
-			size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
-			size_and_mask = ~size_or_mask & 0xfffff00000ULL;
 		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
 			   boot_cpu_data.x86 == 6) {
 			/*
 			 * VIA C* family have Intel style MTRRs,
 			 * but don't support PAE
 			 */
-			size_or_mask = SIZE_OR_MASK_BITS(32);
-			size_and_mask = 0;
 			phys_addr = 32;
 		}
+	}
+
+	size_or_mask = ~((1ULL << ((phys_addr) - PAGE_SHIFT)) - 1);
+	size_and_mask = ~size_or_mask & 0xfffff00000ULL;
+
+	return phys_addr;
+}
+
+int __initdata changed_by_mtrr_cleanup;
+
+/**
+ * mtrr_bp_init - initialize mtrrs on the boot CPU
+ *
+ * This needs to be called early; before any of the other CPUs are
+ * initialized (i.e. before smp_init()).
+ *
+ */
+void __init mtrr_bp_init(void)
+{
+	const char *why = "(not available)";
+	unsigned int phys_addr;
+
+	phys_addr = mtrr_calc_physbits(boot_cpu_has(X86_FEATURE_MTRR));
+
+	if (boot_cpu_has(X86_FEATURE_MTRR)) {
+		mtrr_if = &generic_mtrr_ops;
 	} else {
 		switch (boot_cpu_data.x86_vendor) {
 		case X86_VENDOR_AMD:
-			if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
-				/* Pre-Athlon (K6) AMD CPU MTRRs */
+			/* Pre-Athlon (K6) AMD CPU MTRRs */
+			if (cpu_feature_enabled(X86_FEATURE_K6_MTRR))
 				mtrr_if = &amd_mtrr_ops;
-				size_or_mask = SIZE_OR_MASK_BITS(32);
-				size_and_mask = 0;
-			}
 			break;
 		case X86_VENDOR_CENTAUR:
-			if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
+			if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR))
 				mtrr_if = &centaur_mtrr_ops;
-				size_or_mask = SIZE_OR_MASK_BITS(32);
-				size_and_mask = 0;
-			}
 			break;
 		case X86_VENDOR_CYRIX:
-			if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
+			if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR))
 				mtrr_if = &cyrix_mtrr_ops;
-				size_or_mask = SIZE_OR_MASK_BITS(32);
-				size_and_mask = 0;
-			}
 			break;
 		default:
 			break;