[tip:,ras/core] x86/mce: Add support for Extended Physical Address MCA changes
Commit Message
The following commit has been merged into the ras/core branch of tip:
Commit-ID: fcd343a285cb41894a7bd02dbd675042d394758d
Gitweb: https://git.kernel.org/tip/fcd343a285cb41894a7bd02dbd675042d394758d
Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
AuthorDate: Tue, 06 Dec 2022 11:36:07 -06:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Wed, 28 Dec 2022 22:37:37 +01:00
x86/mce: Add support for Extended Physical Address MCA changes
Newer AMD CPUs support more physical address bits.
That is, the MCA_ADDR registers on Scalable MCA systems contain the
ErrorAddr in bits [56:0] instead of [55:0]. Hence, the existing LSB field
from bits [61:56] in MCA_ADDR must be moved around to accommodate the
larger ErrorAddr size.
MCA_CONFIG[McaLsbInStatusSupported] indicates this change. If set, the
LSB field will be found in MCA_STATUS rather than MCA_ADDR.
Each logical CPU has unique MCA bank in hardware and is not shared with
other logical CPUs. Additionally, on SMCA systems, each feature bit may
be different for each bank within same logical CPU.
Check for MCA_CONFIG[McaLsbInStatusSupported] for each MCA bank and for
each CPU.
Additionally, all MCA banks do not support maximum ErrorAddr bits in
MCA_ADDR. Some banks might support fewer bits but the remaining bits are
marked as reserved.
[ Yazen: Rebased and fixed up formatting.
bp: Massage comments. ]
Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20221206173607.1185907-5-yazen.ghannam@amd.com
---
arch/x86/kernel/cpu/mce/amd.c | 2 ++-
arch/x86/kernel/cpu/mce/core.c | 8 +-------
arch/x86/kernel/cpu/mce/internal.h | 31 ++++++++++++++++++++++++++++-
3 files changed, 33 insertions(+), 8 deletions(-)
@@ -306,6 +306,8 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
if ((low & BIT(5)) && !((high >> 5) & 0x3))
high |= BIT(5);
+ this_cpu_ptr(mce_banks_array)[bank].lsb_in_status = !!(low & BIT(8));
+
wrmsr(smca_config, low, high);
}
@@ -67,13 +67,7 @@ DEFINE_PER_CPU(unsigned, mce_exception_count);
DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
-struct mce_bank {
- u64 ctl; /* subevents to enable */
-
- __u64 init : 1, /* initialise bank? */
- __reserved_1 : 63;
-};
-static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
+DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
#define ATTR_LEN 16
/* One object for each MCE bank, shared by all CPUs */
@@ -177,6 +177,24 @@ struct mce_vendor_flags {
extern struct mce_vendor_flags mce_flags;
+struct mce_bank {
+ /* subevents to enable */
+ u64 ctl;
+
+ /* initialise bank? */
+ __u64 init : 1,
+
+ /*
+ * (AMD) MCA_CONFIG[McaLsbInStatusSupported]: When set, this bit indicates
+ * the LSB field is found in MCA_STATUS and not in MCA_ADDR.
+ */
+ lsb_in_status : 1,
+
+ __reserved_1 : 62;
+};
+
+DECLARE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
+
enum mca_msr {
MCA_CTL,
MCA_STATUS,
@@ -190,7 +208,10 @@ extern bool filter_mce(struct mce *m);
#ifdef CONFIG_X86_MCE_AMD
extern bool amd_filter_mce(struct mce *m);
-/* Extract [55:<lsb>] where lsb is the LS-*valid* bit of the address bits. */
+/*
+ * If MCA_CONFIG[McaLsbInStatusSupported] is set, extract ErrAddr in bits
+ * [56:0] of MCA_STATUS, else in bits [55:0] of MCA_ADDR.
+ */
static __always_inline void smca_extract_err_addr(struct mce *m)
{
u8 lsb;
@@ -198,6 +219,14 @@ static __always_inline void smca_extract_err_addr(struct mce *m)
if (!mce_flags.smca)
return;
+ if (this_cpu_ptr(mce_banks_array)[m->bank].lsb_in_status) {
+ lsb = (m->status >> 24) & 0x3f;
+
+ m->addr &= GENMASK_ULL(56, lsb);
+
+ return;
+ }
+
lsb = (m->addr >> 56) & 0x3f;
m->addr &= GENMASK_ULL(55, lsb);