[v3,16/18] x86/virt: KVM: Move "disable SVM" helper into KVM SVM
Commit Message
Move cpu_svm_disable() into KVM proper now that all hardware
virtualization management is routed through KVM. Remove the now-empty
virtext.h.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/include/asm/virtext.h | 50 ----------------------------------
arch/x86/kvm/svm/svm.c | 28 +++++++++++++++++--
2 files changed, 25 insertions(+), 53 deletions(-)
delete mode 100644 arch/x86/include/asm/virtext.h
Comments
On Fri, 2023-05-12 at 16:50 -0700, Sean Christopherson wrote:
> Move cpu_svm_disable() into KVM proper now that all hardware
> virtualization management is routed through KVM. Remove the now-empty
> virtext.h.
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
> ---
> arch/x86/include/asm/virtext.h | 50 ----------------------------------
> arch/x86/kvm/svm/svm.c | 28 +++++++++++++++++--
> 2 files changed, 25 insertions(+), 53 deletions(-)
> delete mode 100644 arch/x86/include/asm/virtext.h
>
> diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
> deleted file mode 100644
> index 632575e257d8..000000000000
> --- a/arch/x86/include/asm/virtext.h
> +++ /dev/null
> @@ -1,50 +0,0 @@
> -/* SPDX-License-Identifier: GPL-2.0-only */
> -/* CPU virtualization extensions handling
> - *
> - * This should carry the code for handling CPU virtualization extensions
> - * that needs to live in the kernel core.
> - *
> - * Author: Eduardo Habkost <ehabkost@redhat.com>
> - *
> - * Copyright (C) 2008, Red Hat Inc.
> - *
> - * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
> - */
> -#ifndef _ASM_X86_VIRTEX_H
> -#define _ASM_X86_VIRTEX_H
> -
> -#include <asm/processor.h>
> -
> -#include <asm/vmx.h>
> -#include <asm/svm.h>
> -#include <asm/tlbflush.h>
> -
> -/*
> - * SVM functions:
> - */
> -/** Disable SVM on the current CPU
> - */
> -static inline void cpu_svm_disable(void)
> -{
> - uint64_t efer;
> -
> - wrmsrl(MSR_VM_HSAVE_PA, 0);
> - rdmsrl(MSR_EFER, efer);
> - if (efer & EFER_SVME) {
> - /*
> - * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
> - * aren't blocked, e.g. if a fatal error occurred between CLGI
> - * and STGI. Note, STGI may #UD if SVM is disabled from NMI
> - * context between reading EFER and executing STGI. In that
> - * case, GIF must already be set, otherwise the NMI would have
> - * been blocked, so just eat the fault.
> - */
> - asm_volatile_goto("1: stgi\n\t"
> - _ASM_EXTABLE(1b, %l[fault])
> - ::: "memory" : fault);
> -fault:
> - wrmsrl(MSR_EFER, efer & ~EFER_SVME);
> - }
> -}
> -
> -#endif /* _ASM_X86_VIRTEX_H */
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index cf5f3880751b..2cc195d95d32 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -41,7 +41,6 @@
> #include <asm/reboot.h>
> #include <asm/fpu/api.h>
>
> -#include <asm/virtext.h>
> #include "trace.h"
>
> #include "svm.h"
> @@ -587,9 +586,32 @@ void __svm_write_tsc_multiplier(u64 multiplier)
> preempt_enable();
> }
>
> +static inline void kvm_cpu_svm_disable(void)
> +{
> + uint64_t efer;
> +
> + wrmsrl(MSR_VM_HSAVE_PA, 0);
> + rdmsrl(MSR_EFER, efer);
> + if (efer & EFER_SVME) {
> + /*
> + * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
> + * aren't blocked, e.g. if a fatal error occurred between CLGI
> + * and STGI. Note, STGI may #UD if SVM is disabled from NMI
> + * context between reading EFER and executing STGI. In that
> + * case, GIF must already be set, otherwise the NMI would have
> + * been blocked, so just eat the fault.
> + */
> + asm_volatile_goto("1: stgi\n\t"
> + _ASM_EXTABLE(1b, %l[fault])
> + ::: "memory" : fault);
> +fault:
> + wrmsrl(MSR_EFER, efer & ~EFER_SVME);
> + }
> +}
> +
> static void svm_emergency_disable(void)
> {
> - cpu_svm_disable();
> + kvm_cpu_svm_disable();
> }
>
> static void svm_hardware_disable(void)
> @@ -598,7 +620,7 @@ static void svm_hardware_disable(void)
> if (tsc_scaling)
> __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
>
> - cpu_svm_disable();
> + kvm_cpu_svm_disable();
>
> amd_pmu_disable_virt();
> }
> --
> 2.40.1.606.ga4b1b128d6-goog
>
deleted file mode 100644
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* CPU virtualization extensions handling
- *
- * This should carry the code for handling CPU virtualization extensions
- * that needs to live in the kernel core.
- *
- * Author: Eduardo Habkost <ehabkost@redhat.com>
- *
- * Copyright (C) 2008, Red Hat Inc.
- *
- * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
- */
-#ifndef _ASM_X86_VIRTEX_H
-#define _ASM_X86_VIRTEX_H
-
-#include <asm/processor.h>
-
-#include <asm/vmx.h>
-#include <asm/svm.h>
-#include <asm/tlbflush.h>
-
-/*
- * SVM functions:
- */
-/** Disable SVM on the current CPU
- */
-static inline void cpu_svm_disable(void)
-{
- uint64_t efer;
-
- wrmsrl(MSR_VM_HSAVE_PA, 0);
- rdmsrl(MSR_EFER, efer);
- if (efer & EFER_SVME) {
- /*
- * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
- * aren't blocked, e.g. if a fatal error occurred between CLGI
- * and STGI. Note, STGI may #UD if SVM is disabled from NMI
- * context between reading EFER and executing STGI. In that
- * case, GIF must already be set, otherwise the NMI would have
- * been blocked, so just eat the fault.
- */
- asm_volatile_goto("1: stgi\n\t"
- _ASM_EXTABLE(1b, %l[fault])
- ::: "memory" : fault);
-fault:
- wrmsrl(MSR_EFER, efer & ~EFER_SVME);
- }
-}
-
-#endif /* _ASM_X86_VIRTEX_H */
@@ -41,7 +41,6 @@
#include <asm/reboot.h>
#include <asm/fpu/api.h>
-#include <asm/virtext.h>
#include "trace.h"
#include "svm.h"
@@ -587,9 +586,32 @@ void __svm_write_tsc_multiplier(u64 multiplier)
preempt_enable();
}
+static inline void kvm_cpu_svm_disable(void)
+{
+ uint64_t efer;
+
+ wrmsrl(MSR_VM_HSAVE_PA, 0);
+ rdmsrl(MSR_EFER, efer);
+ if (efer & EFER_SVME) {
+ /*
+ * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
+ * aren't blocked, e.g. if a fatal error occurred between CLGI
+ * and STGI. Note, STGI may #UD if SVM is disabled from NMI
+ * context between reading EFER and executing STGI. In that
+ * case, GIF must already be set, otherwise the NMI would have
+ * been blocked, so just eat the fault.
+ */
+ asm_volatile_goto("1: stgi\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "memory" : fault);
+fault:
+ wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+ }
+}
+
static void svm_emergency_disable(void)
{
- cpu_svm_disable();
+ kvm_cpu_svm_disable();
}
static void svm_hardware_disable(void)
@@ -598,7 +620,7 @@ static void svm_hardware_disable(void)
if (tsc_scaling)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
- cpu_svm_disable();
+ kvm_cpu_svm_disable();
amd_pmu_disable_virt();
}