[RFC,v2,3/6] KVM: x86: SVM: Pass through shadow stack MSRs

Message ID 20230524155339.415820-4-john.allen@amd.com
State New
Headers
Series [RFC,v2,1/6] KVM: x86: SVM: Emulate reads and writes to shadow stack MSRs |

Commit Message

John Allen May 24, 2023, 3:53 p.m. UTC
  If kvm supports shadow stack, pass through shadow stack MSRs to improve
guest performance.

Signed-off-by: John Allen <john.allen@amd.com>
---
 arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++
 arch/x86/kvm/svm/svm.h |  2 +-
 2 files changed, 18 insertions(+), 1 deletion(-)
  

Comments

Sean Christopherson June 24, 2023, 12:05 a.m. UTC | #1
On Wed, May 24, 2023, John Allen wrote:
> If kvm supports shadow stack, pass through shadow stack MSRs to improve
> guest performance.
> 
> Signed-off-by: John Allen <john.allen@amd.com>
> ---
>  arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++
>  arch/x86/kvm/svm/svm.h |  2 +-
>  2 files changed, 18 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 6df486bb1ac4..cdbce20989b8 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -136,6 +136,13 @@ static const struct svm_direct_access_msrs {
>  	{ .index = X2APIC_MSR(APIC_TMICT),		.always = false },
>  	{ .index = X2APIC_MSR(APIC_TMCCT),		.always = false },
>  	{ .index = X2APIC_MSR(APIC_TDCR),		.always = false },
> +	{ .index = MSR_IA32_U_CET,                      .always = false },
> +	{ .index = MSR_IA32_S_CET,                      .always = false },
> +	{ .index = MSR_IA32_INT_SSP_TAB,                .always = false },
> +	{ .index = MSR_IA32_PL0_SSP,                    .always = false },
> +	{ .index = MSR_IA32_PL1_SSP,                    .always = false },
> +	{ .index = MSR_IA32_PL2_SSP,                    .always = false },
> +	{ .index = MSR_IA32_PL3_SSP,                    .always = false },
>  	{ .index = MSR_INVALID,				.always = false },
>  };
>  
> @@ -1181,6 +1188,16 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
>  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
>  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
>  	}
> +
> +	if (kvm_cet_user_supported() && guest_cpuid_has(vcpu, X86_FEATURE_SHSTK)) {
> +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_U_CET, 1, 1);
> +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_S_CET, 1, 1);
> +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_INT_SSP_TAB, 1, 1);
> +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL0_SSP, 1, 1);
> +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL1_SSP, 1, 1);
> +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL2_SSP, 1, 1);
> +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL3_SSP, 1, 1);
> +	}

This is wrong, KVM needs to set/clear interception based on SHSKT, i.e. it can't
be a one-way street.  Userspace *probably* won't toggle SHSTK in guest CPUID, but
weirder things have happened.
  
John Allen Aug. 1, 2023, 3:25 p.m. UTC | #2
On Fri, Jun 23, 2023 at 05:05:18PM -0700, Sean Christopherson wrote:
> On Wed, May 24, 2023, John Allen wrote:
> > If kvm supports shadow stack, pass through shadow stack MSRs to improve
> > guest performance.
> > 
> > Signed-off-by: John Allen <john.allen@amd.com>
> > ---
> >  arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++
> >  arch/x86/kvm/svm/svm.h |  2 +-
> >  2 files changed, 18 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index 6df486bb1ac4..cdbce20989b8 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -136,6 +136,13 @@ static const struct svm_direct_access_msrs {
> >  	{ .index = X2APIC_MSR(APIC_TMICT),		.always = false },
> >  	{ .index = X2APIC_MSR(APIC_TMCCT),		.always = false },
> >  	{ .index = X2APIC_MSR(APIC_TDCR),		.always = false },
> > +	{ .index = MSR_IA32_U_CET,                      .always = false },
> > +	{ .index = MSR_IA32_S_CET,                      .always = false },
> > +	{ .index = MSR_IA32_INT_SSP_TAB,                .always = false },
> > +	{ .index = MSR_IA32_PL0_SSP,                    .always = false },
> > +	{ .index = MSR_IA32_PL1_SSP,                    .always = false },
> > +	{ .index = MSR_IA32_PL2_SSP,                    .always = false },
> > +	{ .index = MSR_IA32_PL3_SSP,                    .always = false },
> >  	{ .index = MSR_INVALID,				.always = false },
> >  };
> >  
> > @@ -1181,6 +1188,16 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
> >  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
> >  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
> >  	}
> > +
> > +	if (kvm_cet_user_supported() && guest_cpuid_has(vcpu, X86_FEATURE_SHSTK)) {
> > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_U_CET, 1, 1);
> > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_S_CET, 1, 1);
> > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_INT_SSP_TAB, 1, 1);
> > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL0_SSP, 1, 1);
> > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL1_SSP, 1, 1);
> > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL2_SSP, 1, 1);
> > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL3_SSP, 1, 1);
> > +	}
> 
> This is wrong, KVM needs to set/clear interception based on SHSKT, i.e. it can't
> be a one-way street.  Userspace *probably* won't toggle SHSTK in guest CPUID, but
> weirder things have happened.

Can you clarify what you mean by that? Do you mean that we need to check
both guest_cpuid_has and kvm_cpu_cap_has like the guest_can_use function
that is used in Weijiang Yang's series? Or is there something else I'm
omitting here?

static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
                                          unsigned int feature)
{
        return kvm_cpu_cap_has(feature) && guest_cpuid_has(vcpu, feature);
}
  
Sean Christopherson Aug. 1, 2023, 4:42 p.m. UTC | #3
On Tue, Aug 01, 2023, John Allen wrote:
> On Fri, Jun 23, 2023 at 05:05:18PM -0700, Sean Christopherson wrote:
> > On Wed, May 24, 2023, John Allen wrote:
> > > If kvm supports shadow stack, pass through shadow stack MSRs to improve
> > > guest performance.
> > > 
> > > Signed-off-by: John Allen <john.allen@amd.com>
> > > ---
> > >  arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++
> > >  arch/x86/kvm/svm/svm.h |  2 +-
> > >  2 files changed, 18 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > > index 6df486bb1ac4..cdbce20989b8 100644
> > > --- a/arch/x86/kvm/svm/svm.c
> > > +++ b/arch/x86/kvm/svm/svm.c
> > > @@ -136,6 +136,13 @@ static const struct svm_direct_access_msrs {
> > >  	{ .index = X2APIC_MSR(APIC_TMICT),		.always = false },
> > >  	{ .index = X2APIC_MSR(APIC_TMCCT),		.always = false },
> > >  	{ .index = X2APIC_MSR(APIC_TDCR),		.always = false },
> > > +	{ .index = MSR_IA32_U_CET,                      .always = false },
> > > +	{ .index = MSR_IA32_S_CET,                      .always = false },
> > > +	{ .index = MSR_IA32_INT_SSP_TAB,                .always = false },
> > > +	{ .index = MSR_IA32_PL0_SSP,                    .always = false },
> > > +	{ .index = MSR_IA32_PL1_SSP,                    .always = false },
> > > +	{ .index = MSR_IA32_PL2_SSP,                    .always = false },
> > > +	{ .index = MSR_IA32_PL3_SSP,                    .always = false },
> > >  	{ .index = MSR_INVALID,				.always = false },
> > >  };
> > >  
> > > @@ -1181,6 +1188,16 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
> > >  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
> > >  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
> > >  	}
> > > +
> > > +	if (kvm_cet_user_supported() && guest_cpuid_has(vcpu, X86_FEATURE_SHSTK)) {
> > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_U_CET, 1, 1);
> > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_S_CET, 1, 1);
> > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_INT_SSP_TAB, 1, 1);
> > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL0_SSP, 1, 1);
> > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL1_SSP, 1, 1);
> > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL2_SSP, 1, 1);
> > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL3_SSP, 1, 1);
> > > +	}
> > 
> > This is wrong, KVM needs to set/clear interception based on SHSKT, i.e. it can't
> > be a one-way street.  Userspace *probably* won't toggle SHSTK in guest CPUID, but
> > weirder things have happened.
> 
> Can you clarify what you mean by that? Do you mean that we need to check
> both guest_cpuid_has and kvm_cpu_cap_has like the guest_can_use function
> that is used in Weijiang Yang's series? Or is there something else I'm
> omitting here?

When init_vmcb_after_set_cpuid() is called, KVM must not assume that the MSRs are
currently intercepted, i.e. KVM can't just handle the case where userspace enables
SHSTK, KVM must also handle the case where userspace disables SHSTK.

Using guest_can_use() is also a good idea, but it would likely lead to extra work
on CPUs that don't support CET/SHSTK.  This isn't a fastpath, but toggling
interception for MSRs that don't exist would be odd.  It's probably better to
effectively open code guest_can_use(), which the KVM check gating the MSR toggling.

E.g. something like

	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
		bool shstk_enabled = guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);

		set_msr_inteception(vcpu, svm->msrpm, MSR_IA32_BLAH,
				    shstk_enabled, shstk_enabled);
	}
  
John Allen Aug. 1, 2023, 4:51 p.m. UTC | #4
On Tue, Aug 01, 2023 at 09:42:09AM -0700, Sean Christopherson wrote:
> On Tue, Aug 01, 2023, John Allen wrote:
> > On Fri, Jun 23, 2023 at 05:05:18PM -0700, Sean Christopherson wrote:
> > > On Wed, May 24, 2023, John Allen wrote:
> > > > If kvm supports shadow stack, pass through shadow stack MSRs to improve
> > > > guest performance.
> > > > 
> > > > Signed-off-by: John Allen <john.allen@amd.com>
> > > > ---
> > > >  arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++
> > > >  arch/x86/kvm/svm/svm.h |  2 +-
> > > >  2 files changed, 18 insertions(+), 1 deletion(-)
> > > > 
> > > > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > > > index 6df486bb1ac4..cdbce20989b8 100644
> > > > --- a/arch/x86/kvm/svm/svm.c
> > > > +++ b/arch/x86/kvm/svm/svm.c
> > > > @@ -136,6 +136,13 @@ static const struct svm_direct_access_msrs {
> > > >  	{ .index = X2APIC_MSR(APIC_TMICT),		.always = false },
> > > >  	{ .index = X2APIC_MSR(APIC_TMCCT),		.always = false },
> > > >  	{ .index = X2APIC_MSR(APIC_TDCR),		.always = false },
> > > > +	{ .index = MSR_IA32_U_CET,                      .always = false },
> > > > +	{ .index = MSR_IA32_S_CET,                      .always = false },
> > > > +	{ .index = MSR_IA32_INT_SSP_TAB,                .always = false },
> > > > +	{ .index = MSR_IA32_PL0_SSP,                    .always = false },
> > > > +	{ .index = MSR_IA32_PL1_SSP,                    .always = false },
> > > > +	{ .index = MSR_IA32_PL2_SSP,                    .always = false },
> > > > +	{ .index = MSR_IA32_PL3_SSP,                    .always = false },
> > > >  	{ .index = MSR_INVALID,				.always = false },
> > > >  };
> > > >  
> > > > @@ -1181,6 +1188,16 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
> > > >  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
> > > >  		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
> > > >  	}
> > > > +
> > > > +	if (kvm_cet_user_supported() && guest_cpuid_has(vcpu, X86_FEATURE_SHSTK)) {
> > > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_U_CET, 1, 1);
> > > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_S_CET, 1, 1);
> > > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_INT_SSP_TAB, 1, 1);
> > > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL0_SSP, 1, 1);
> > > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL1_SSP, 1, 1);
> > > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL2_SSP, 1, 1);
> > > > +		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL3_SSP, 1, 1);
> > > > +	}
> > > 
> > > This is wrong, KVM needs to set/clear interception based on SHSKT, i.e. it can't
> > > be a one-way street.  Userspace *probably* won't toggle SHSTK in guest CPUID, but
> > > weirder things have happened.
> > 
> > Can you clarify what you mean by that? Do you mean that we need to check
> > both guest_cpuid_has and kvm_cpu_cap_has like the guest_can_use function
> > that is used in Weijiang Yang's series? Or is there something else I'm
> > omitting here?
> 
> When init_vmcb_after_set_cpuid() is called, KVM must not assume that the MSRs are
> currently intercepted, i.e. KVM can't just handle the case where userspace enables
> SHSTK, KVM must also handle the case where userspace disables SHSTK.
> 
> Using guest_can_use() is also a good idea, but it would likely lead to extra work
> on CPUs that don't support CET/SHSTK.  This isn't a fastpath, but toggling
> interception for MSRs that don't exist would be odd.  It's probably better to
> effectively open code guest_can_use(), which the KVM check gating the MSR toggling.
> 
> E.g. something like
> 
> 	if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
> 		bool shstk_enabled = guest_cpuid_has(vcpu, X86_FEATURE_SHSTK);
> 
> 		set_msr_inteception(vcpu, svm->msrpm, MSR_IA32_BLAH,
> 				    shstk_enabled, shstk_enabled);
> 	}

Thanks for the clarification. I will use the above method in the next
version of the series.
  

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 6df486bb1ac4..cdbce20989b8 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -136,6 +136,13 @@  static const struct svm_direct_access_msrs {
 	{ .index = X2APIC_MSR(APIC_TMICT),		.always = false },
 	{ .index = X2APIC_MSR(APIC_TMCCT),		.always = false },
 	{ .index = X2APIC_MSR(APIC_TDCR),		.always = false },
+	{ .index = MSR_IA32_U_CET,                      .always = false },
+	{ .index = MSR_IA32_S_CET,                      .always = false },
+	{ .index = MSR_IA32_INT_SSP_TAB,                .always = false },
+	{ .index = MSR_IA32_PL0_SSP,                    .always = false },
+	{ .index = MSR_IA32_PL1_SSP,                    .always = false },
+	{ .index = MSR_IA32_PL2_SSP,                    .always = false },
+	{ .index = MSR_IA32_PL3_SSP,                    .always = false },
 	{ .index = MSR_INVALID,				.always = false },
 };
 
@@ -1181,6 +1188,16 @@  static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
 		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
 		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
 	}
+
+	if (kvm_cet_user_supported() && guest_cpuid_has(vcpu, X86_FEATURE_SHSTK)) {
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_U_CET, 1, 1);
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_S_CET, 1, 1);
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_INT_SSP_TAB, 1, 1);
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL0_SSP, 1, 1);
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL1_SSP, 1, 1);
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL2_SSP, 1, 1);
+		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PL3_SSP, 1, 1);
+	}
 }
 
 static void init_vmcb(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f44751dd8d5d..dad977747a15 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -29,7 +29,7 @@ 
 #define	IOPM_SIZE PAGE_SIZE * 3
 #define	MSRPM_SIZE PAGE_SIZE * 2
 
-#define MAX_DIRECT_ACCESS_MSRS	46
+#define MAX_DIRECT_ACCESS_MSRS	53
 #define MSRPM_OFFSETS	32
 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
 extern bool npt_enabled;