From: Dave Hansen <dave.hansen@linux.intel.com>
Part of the reason that this all turned into such a mess is that there
were no rules around when 'struct cpuinfo_x86' was written. It was
(and is) just a free-for-all.
Establish that 'x86_config' has two phases of its lifetime: an
C_INITIALIZING phase where it can be written and a later C_FINALIZED
stage where it can only be read. It is simple to audit the fact
that this state transition happens just where the comment says it
should be.
Check that the config is C_FINALIZED in each of the wrappers that
read a 'x86_config' value. If something reads too early, stash
some information to the caller so that it can spit out a warning
later.
This goofy stash-then-warn construct is necessary here because any
hapless readers are likely to be in a spot where they can not easily
WARN() themselves, like the early Xen PV boot that's caused so many
problems.
This also moves one x86_clflush_size() reference over to the more
direct x86_config.cache_alignment because otherwise it would trip
the !C_FINALIZED check.
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---
b/arch/x86/include/asm/processor.h | 22 ++++++++++++++++++++++
b/arch/x86/kernel/cpu/common.c | 5 ++++-
b/arch/x86/kernel/setup.c | 7 +++++++
3 files changed, 33 insertions(+), 1 deletion(-)
@@ -183,6 +183,11 @@ struct x86_addr_config {
u8 min_cache_bits;
};
+enum x86_sys_config_state {
+ C_INITIALIZING,
+ C_FINALIZED
+};
+
/*
* System-wide configuration that is shared by all processors.
*
@@ -190,6 +195,9 @@ struct x86_addr_config {
* modified after that.
*/
struct x86_sys_config {
+ enum x86_sys_config_state conf_state;
+ void *early_reader;
+
/* Address bits supported by all processors */
u8 phys_bits;
u8 virt_bits;
@@ -805,23 +813,37 @@ static inline void weak_wrmsr_fence(void
alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE));
}
+static inline void read_x86_config(void)
+{
+ if (x86_config.conf_state == C_FINALIZED)
+ return;
+
+ /* Only record the first one: */
+ if (!x86_config.early_reader)
+ x86_config.early_reader = __builtin_return_address(0);
+}
+
static inline u8 x86_phys_bits(void)
{
+ read_x86_config();
return x86_config.phys_bits;
}
static inline u8 x86_virt_bits(void)
{
+ read_x86_config();
return x86_config.virt_bits;
}
static inline u8 x86_cache_bits(void)
{
+ read_x86_config();
return x86_config.cache_bits;
}
static inline u8 x86_clflush_size(void)
{
+ read_x86_config();
return x86_config.clflush_size;
}
@@ -1114,6 +1114,9 @@ void get_cpu_address_sizes(struct cpuinf
u32 eax, ebx, ecx, edx;
bool vp_bits_from_cpuid = true;
+ WARN_ON_ONCE(x86_config.conf_state > C_INITIALIZING);
+ x86_config.conf_state = C_INITIALIZING;
+
if (!cpu_has(c, X86_FEATURE_CPUID) ||
(c->extended_cpuid_level < 0x80000008))
vp_bits_from_cpuid = false;
@@ -1142,7 +1145,7 @@ void get_cpu_address_sizes(struct cpuinf
if (x86_config.cache_bits < bsp_addr_config.min_cache_bits)
x86_config.cache_bits = bsp_addr_config.min_cache_bits;
- x86_config.cache_alignment = x86_clflush_size();
+ x86_config.cache_alignment = x86_config.clflush_size;
if (bsp_addr_config.cache_align_mult)
x86_config.cache_alignment *= bsp_addr_config.cache_align_mult;
@@ -762,7 +762,14 @@ void __init setup_arch(char **cmdline_p)
olpc_ofw_detect();
idt_setup_early_traps();
+
early_cpu_init();
+
+ /* Ensure no readers snuck in before the config was finished: */
+ WARN_ONCE(x86_config.early_reader, "x86_config.early_reader: %pS\n",
+ x86_config.early_reader);
+ x86_config.conf_state = C_FINALIZED;
+
jump_label_init();
static_call_init();
early_ioremap_init();