[05/12] KVM: x86: Use KVM-governed feature framework to track "XSAVES enabled"

Message ID 20230217231022.816138-6-seanjc@google.com
State New
Headers
Series KVM: x86: Add "governed" X86_FEATURE framework |

Commit Message

Sean Christopherson Feb. 17, 2023, 11:10 p.m. UTC
  Use the governed feature framework to track if XSAVES is "enabled", i.e.
if XSAVES can be used by the guest.  Add a comment in the SVM code to
explain the very unintuitive logic of deliberately NOT checking if XSAVES
is enumerated in the guest CPUID model.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/governed_features.h |  1 +
 arch/x86/kvm/svm/svm.c           | 17 ++++++++++++---
 arch/x86/kvm/vmx/vmx.c           | 36 ++++++++++++++++----------------
 arch/x86/kvm/x86.c               |  4 ++--
 4 files changed, 35 insertions(+), 23 deletions(-)
  

Comments

Yu Zhang Feb. 21, 2023, 2:56 p.m. UTC | #1
On Fri, Feb 17, 2023 at 03:10:15PM -0800, Sean Christopherson wrote:
> Use the governed feature framework to track if XSAVES is "enabled", i.e.
> if XSAVES can be used by the guest.  Add a comment in the SVM code to
> explain the very unintuitive logic of deliberately NOT checking if XSAVES
> is enumerated in the guest CPUID model.
> 
> No functional change intended.

xsaves_enabled in struct kvm_vcpu_arch is no longer used. But instead of
just deleting it, maybe we could move 'bool load_eoi_exitmap_pending' to
its place, so 7 bytes can be saved for each struct kvm_vcpu_arch:

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cd660de02f7b..0eef5469c165 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -740,7 +740,6 @@ struct kvm_vcpu_arch {
        u64 efer;
        u64 apic_base;
        struct kvm_lapic *apic;    /* kernel irqchip context */
-       bool load_eoi_exitmap_pending;
        DECLARE_BITMAP(ioapic_handled_vectors, 256);
        unsigned long apic_attention;
        int32_t apic_arb_prio;
@@ -750,7 +749,7 @@ struct kvm_vcpu_arch {
        u64 smi_count;
        bool at_instruction_boundary;
        bool tpr_access_reporting;
-       bool xsaves_enabled;
+       bool load_eoi_exitmap_pending;
        bool xfd_no_write_intercept;
        u64 ia32_xss;
        u64 microcode_version;

B.R.
Yu
  
Sean Christopherson Feb. 22, 2023, 6:56 p.m. UTC | #2
On Tue, Feb 21, 2023, Yu Zhang wrote:
> On Fri, Feb 17, 2023 at 03:10:15PM -0800, Sean Christopherson wrote:
> > Use the governed feature framework to track if XSAVES is "enabled", i.e.
> > if XSAVES can be used by the guest.  Add a comment in the SVM code to
> > explain the very unintuitive logic of deliberately NOT checking if XSAVES
> > is enumerated in the guest CPUID model.
> > 
> > No functional change intended.
> 
> xsaves_enabled in struct kvm_vcpu_arch is no longer used. But instead of
> just deleting it, maybe we could move 'bool load_eoi_exitmap_pending' to
> its place, so 7 bytes can be saved for each struct kvm_vcpu_arch:

I prefer leaving load_eoi_exitmap_pending where it is so that it's co-located with
ioapic_handled_vectors.  I agree wasting 7 bytes is unfortunate, but I don't want
to take an ad hoc approach to shrinking per-vCPU structs.  See the link below for
more discussion.

https://lore.kernel.org/all/20230213163351.30704-1-minipli@grsecurity.net

> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index cd660de02f7b..0eef5469c165 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -740,7 +740,6 @@ struct kvm_vcpu_arch {
>         u64 efer;
>         u64 apic_base;
>         struct kvm_lapic *apic;    /* kernel irqchip context */
> -       bool load_eoi_exitmap_pending;
>         DECLARE_BITMAP(ioapic_handled_vectors, 256);
>         unsigned long apic_attention;
>         int32_t apic_arb_prio;
> @@ -750,7 +749,7 @@ struct kvm_vcpu_arch {
>         u64 smi_count;
>         bool at_instruction_boundary;
>         bool tpr_access_reporting;
> -       bool xsaves_enabled;
> +       bool load_eoi_exitmap_pending;
>         bool xfd_no_write_intercept;
>         u64 ia32_xss;
>         u64 microcode_version;
> 
> B.R.
> Yu
>
  
Yu Zhang Feb. 24, 2023, 9:54 a.m. UTC | #3
On Wed, Feb 22, 2023 at 10:56:04AM -0800, Sean Christopherson wrote:
> On Tue, Feb 21, 2023, Yu Zhang wrote:
> > On Fri, Feb 17, 2023 at 03:10:15PM -0800, Sean Christopherson wrote:
> > > Use the governed feature framework to track if XSAVES is "enabled", i.e.
> > > if XSAVES can be used by the guest.  Add a comment in the SVM code to
> > > explain the very unintuitive logic of deliberately NOT checking if XSAVES
> > > is enumerated in the guest CPUID model.
> > > 
> > > No functional change intended.
> > 
> > xsaves_enabled in struct kvm_vcpu_arch is no longer used. But instead of
> > just deleting it, maybe we could move 'bool load_eoi_exitmap_pending' to
> > its place, so 7 bytes can be saved for each struct kvm_vcpu_arch:
> 
> I prefer leaving load_eoi_exitmap_pending where it is so that it's co-located with
> ioapic_handled_vectors.  I agree wasting 7 bytes is unfortunate, but I don't want
> to take an ad hoc approach to shrinking per-vCPU structs.  See the link below for
> more discussion.
> 
> https://lore.kernel.org/all/20230213163351.30704-1-minipli@grsecurity.net

Fair enough. :)

Thanks
Yu
  

Patch

diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h
index b29c15d5e038..b896a64e4ac3 100644
--- a/arch/x86/kvm/governed_features.h
+++ b/arch/x86/kvm/governed_features.h
@@ -6,6 +6,7 @@  BUILD_BUG()
 #define KVM_GOVERNED_X86_FEATURE(x) KVM_GOVERNED_FEATURE(X86_FEATURE_##x)
 
 KVM_GOVERNED_X86_FEATURE(GBPAGES)
+KVM_GOVERNED_X86_FEATURE(XSAVES)
 
 #undef KVM_GOVERNED_X86_FEATURE
 #undef KVM_GOVERNED_FEATURE
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index b43775490074..d89e516449ad 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4128,9 +4128,20 @@  static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_cpuid_entry2 *best;
 
-	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-				    boot_cpu_has(X86_FEATURE_XSAVE) &&
-				    boot_cpu_has(X86_FEATURE_XSAVES);
+	/*
+	 * SVM doesn't provide a way to disable just XSAVES in the guest, KVM
+	 * can only disable all variants of by disallowing CR4.OSXSAVE from
+	 * being set.  As a result, if the host has XSAVE and XSAVES, and the
+	 * guest has XSAVE enabled, the guest can execute XSAVES without
+	 * faulting.  Treat XSAVES as enabled in this case regardless of
+	 * whether it's advertised to the guest so that KVM context switches
+	 * XSS on VM-Enter/VM-Exit.  Failure to do so would effectively give
+	 * the guest read/write access to the host's XSS.
+	 */
+	if (boot_cpu_has(X86_FEATURE_XSAVE) &&
+	    boot_cpu_has(X86_FEATURE_XSAVES) &&
+	    guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
+		kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES);
 
 	/* Update nrips enabled cache */
 	svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 14ce195eee5a..c64a12756016 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4551,16 +4551,19 @@  vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
  * based on a single guest CPUID bit, with a dedicated feature bit.  This also
  * verifies that the control is actually supported by KVM and hardware.
  */
-#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
-({									 \
-	bool __enabled;							 \
-									 \
-	if (cpu_has_vmx_##name()) {					 \
-		__enabled = guest_cpuid_has(&(vmx)->vcpu,		 \
-					    X86_FEATURE_##feat_name);	 \
-		vmx_adjust_secondary_exec_control(vmx, exec_control,	 \
-			SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \
-	}								 \
+#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting)	\
+({												\
+	struct kvm_vcpu *__vcpu = &(vmx)->vcpu;							\
+	bool __enabled;										\
+												\
+	if (cpu_has_vmx_##name()) {								\
+		if (kvm_is_governed_feature(X86_FEATURE_##feat_name))				\
+			__enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name);		\
+		else										\
+			__enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name);		\
+		vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
+						  __enabled, exiting);				\
+	}											\
 })
 
 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
@@ -4620,10 +4623,7 @@  static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
 	if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
 		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
 
-	if (cpu_has_vmx_xsaves())
-		vmx_adjust_secondary_exec_control(vmx, &exec_control,
-						  SECONDARY_EXEC_ENABLE_XSAVES,
-						  vcpu->arch.xsaves_enabled, false);
+	vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
 
 	/*
 	 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
@@ -4642,6 +4642,7 @@  static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
 						  SECONDARY_EXEC_ENABLE_RDTSCP,
 						  rdpid_or_rdtscp_enabled, false);
 	}
+
 	vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
 
 	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
@@ -7705,10 +7706,9 @@  static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 	 * to the guest.  XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
 	 * set if and only if XSAVE is supported.
 	 */
-	vcpu->arch.xsaves_enabled = cpu_has_vmx_xsaves() &&
-				    boot_cpu_has(X86_FEATURE_XSAVE) &&
-				    guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-				    guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
+	if (cpu_has_vmx_xsaves() && boot_cpu_has(X86_FEATURE_XSAVE) &&
+	    guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
+		kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
 
 	vmx_setup_uret_msrs(vmx);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f706621c35b8..541982de5762 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -988,7 +988,7 @@  void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
 		if (vcpu->arch.xcr0 != host_xcr0)
 			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
 
-		if (vcpu->arch.xsaves_enabled &&
+		if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
 		    vcpu->arch.ia32_xss != host_xss)
 			wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
 	}
@@ -1023,7 +1023,7 @@  void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 		if (vcpu->arch.xcr0 != host_xcr0)
 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
 
-		if (vcpu->arch.xsaves_enabled &&
+		if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
 		    vcpu->arch.ia32_xss != host_xss)
 			wrmsrl(MSR_IA32_XSS, host_xss);
 	}