[1/3] x86/cpufeatures: Add support for cpuid leaf 80000021/EAX (FeatureExt2Eax)
Commit Message
AMD Zen4 processors advertise features in this leaf.
Add the leaf and its Automatic IBRS feature bit.
Note: New whole leaf (vs a bit) due to propagation via KVM
later in this series.
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
---
arch/x86/include/asm/cpufeature.h | 7 +++++--
arch/x86/include/asm/cpufeatures.h | 5 ++++-
arch/x86/include/asm/disabled-features.h | 3 ++-
arch/x86/include/asm/required-features.h | 3 ++-
arch/x86/kernel/cpu/common.c | 3 +++
5 files changed, 16 insertions(+), 5 deletions(-)
Comments
On Fri, Nov 04, 2022 at 04:36:49PM -0500, Kim Phillips wrote:
> AMD Zen4 processors advertise features in this leaf.
> Add the leaf and its Automatic IBRS feature bit.
>
> Note: New whole leaf (vs a bit) due to propagation via KVM
> later in this series.
No, not a separate leaf - use scattered.c
For an example what to do for KVM, see
https://lore.kernel.org/r/20221103025030.78371-1-jiaxi.chen@linux.intel.com
Thx.
On 11/4/22 4:48 PM, Borislav Petkov wrote:
> On Fri, Nov 04, 2022 at 04:36:49PM -0500, Kim Phillips wrote:
>> AMD Zen4 processors advertise features in this leaf.
>> Add the leaf and its Automatic IBRS feature bit.
>>
>> Note: New whole leaf (vs a bit) due to propagation via KVM
>> later in this series.
>
> No, not a separate leaf - use scattered.c
> > For an example what to do for KVM, see
>
> https://lore.kernel.org/r/20221103025030.78371-1-jiaxi.chen@linux.intel.com
That adds features that are mutually exclusive between
kvm and the host kernel, unlike AUTOIBRS.
When trying to wire up a scattered host AUTOIBRS version up to
kvm, I couldn't get past all the reverse_cpuid_check()
BUILD_BUGs demanding exclusivity between h/w and "Linux"
(s/w) FEATUREs.
Is there an example of a scattered feature that gets both its
boot_cpu_has() and guest_cpuid_has() satisfied in the same build?
If not, I'll resubmit like this original submission - AUTOIBRS in
a separate h/w leaf.
Thanks,
Kim
On Tue, Nov 15, 2022 at 05:10:50PM -0600, Kim Phillips wrote:
> When trying to wire up a scattered host AUTOIBRS version up to
> kvm, I couldn't get past all the reverse_cpuid_check()
> BUILD_BUGs demanding exclusivity between h/w and "Linux"
> (s/w) FEATUREs.
I guess something like below.
Sean, can you pls check the KVM bits whether I've done them all right?
In any case, it seems to work, guest has:
processor : 0
vendor_id : AuthenticAMD
cpu family : 25
model : 1
model name : AMD EPYC-Milan Processor
stepping : 1
flags : ... autoibrs ...
---
From: Borislav Petkov <bp@suse.de>
Date: Wed, 16 Nov 2022 12:50:08 +0100
Subject: [PATCH] x86/cpu, kvm: Add X86_FEATURE_AUTOIBRS
Add AMD AutoIBRS feature bit support. Use a synthetic bit as this is the
first bit from the 0x80000021 leaf.
Add the corresponding word to KVM's feature machinery so that the bit
gets advertized into the guest too.
Signed-off-by: Borislav Petkov <bp@suse.de>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/kernel/cpu/scattered.c | 1 +
arch/x86/kvm/cpuid.c | 2 ++
arch/x86/kvm/reverse_cpuid.h | 18 ++++++++++++------
4 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 2bc1557dc203..2cf102911241 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -306,6 +306,7 @@
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */
#define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */
+#define X86_FEATURE_AUTOIBRS (11*32+20) /* AMD Automatic IBRS */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index f53944fb8f7f..e20117658c5b 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -45,6 +45,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+ { X86_FEATURE_AUTOIBRS, CPUID_EAX, 8, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
{ 0, 0, 0, 0, 0 }
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c92c49a0b35b..050bca360731 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -750,6 +750,8 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
kvm_cpu_cap_clear(X86_FEATURE_RDPID);
}
+
+ kvm_cpu_cap_init_scattered(CPUID_8000_0021_EAX, SF(AUTOIBRS));
}
EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index 4e5b8444f161..c4801ac84a4a 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -13,6 +13,7 @@
*/
enum kvm_only_cpuid_leafs {
CPUID_12_EAX = NCAPINTS,
+ CPUID_8000_0021_EAX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -25,6 +26,9 @@ enum kvm_only_cpuid_leafs {
#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1)
#define KVM_X86_FEATURE_SGX_EDECCSSA KVM_X86_FEATURE(CPUID_12_EAX, 11)
+/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX) */
+#define KVM_X86_FEATURE_AUTOIBRS KVM_X86_FEATURE(CPUID_8000_0021_EAX, 8)
+
struct cpuid_reg {
u32 function;
u32 index;
@@ -49,6 +53,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
[CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX},
[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
+ [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
};
/*
@@ -75,12 +80,13 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
*/
static __always_inline u32 __feature_translate(int x86_feature)
{
- if (x86_feature == X86_FEATURE_SGX1)
- return KVM_X86_FEATURE_SGX1;
- else if (x86_feature == X86_FEATURE_SGX2)
- return KVM_X86_FEATURE_SGX2;
- else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
- return KVM_X86_FEATURE_SGX_EDECCSSA;
+ switch (x86_feature) {
+ case X86_FEATURE_SGX1: return KVM_X86_FEATURE_SGX1;
+ case X86_FEATURE_SGX2: return KVM_X86_FEATURE_SGX2;
+ case X86_FEATURE_SGX_EDECCSSA: return KVM_X86_FEATURE_SGX_EDECCSSA;
+ case X86_FEATURE_AUTOIBRS: return KVM_X86_FEATURE_AUTOIBRS;
+ default: break;
+ }
return x86_feature;
}
On Wed, Nov 16, 2022, Borislav Petkov wrote:
> On Tue, Nov 15, 2022 at 05:10:50PM -0600, Kim Phillips wrote:
> > When trying to wire up a scattered host AUTOIBRS version up to
> > kvm, I couldn't get past all the reverse_cpuid_check()
> > BUILD_BUGs demanding exclusivity between h/w and "Linux"
> > (s/w) FEATUREs.
FWIW, it's not exclusivity per se, it's to ensure that any CPUID bits KVM wants
to advertise to userspace uses the architectural definition and not the kernel's
software defined info. This allows KVM to do things like
if (guest_cpuid_has(X86_FEATURE_AUTOIBRS))
and guarantee that the lookup on guest CPUID, which follows the architectural
layout, will look at the correct leaf+subleaf+reg+bit.
> I guess something like below.
>
> Sean, can you pls check the KVM bits whether I've done them all right?
Looks correct.
On Wed, Nov 16, 2022 at 08:22:23PM +0000, Sean Christopherson wrote:
> > Sean, can you pls check the KVM bits whether I've done them all right?
>
> Looks correct.
Thanks!
@@ -32,6 +32,7 @@ enum cpuid_leafs
CPUID_8000_0007_EBX,
CPUID_7_EDX,
CPUID_8000_001F_EAX,
+ CPUID_8000_0021_EAX,
};
#define X86_CAP_FMT_NUM "%d:%d"
@@ -94,8 +95,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 20))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 21))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -118,8 +120,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 20))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 21))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 20 /* N 32-bit words worth of info */
+#define NCAPINTS 21 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -421,6 +421,9 @@
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
+#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* AMD Automatic IBRS */
+
/*
* BUG word(s)
*/
@@ -118,6 +118,7 @@
#define DISABLED_MASK17 0
#define DISABLED_MASK18 0
#define DISABLED_MASK19 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+#define DISABLED_MASK20 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
@@ -98,6 +98,7 @@
#define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0
#define REQUIRED_MASK19 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20)
+#define REQUIRED_MASK20 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
@@ -1091,6 +1091,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x8000001f)
c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
+ if (c->extended_cpuid_level >= 0x80000021)
+ c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
+
init_scattered_cpuid_features(c);
init_speculation_control(c);