[v2,6/8] KVM: SVM: restore host save area from assembly
Commit Message
This is needed so that FILL_RETURN_BUFFER has access to the
percpu area via the GS segment base.
Cc: stable@vger.kernel.org
Fixes: f14eec0a3203 ("KVM: SVM: move more vmentry code to assembly")
Reported-by: Nathan Chancellor <nathan@kernel.org>
Analyzed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch/x86/kvm/svm/svm.c | 3 +--
arch/x86/kvm/svm/svm.h | 2 +-
arch/x86/kvm/svm/svm_ops.h | 5 -----
arch/x86/kvm/svm/vmenter.S | 13 +++++++++++++
4 files changed, 15 insertions(+), 8 deletions(-)
@@ -3922,8 +3922,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
} else {
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
- __svm_vcpu_run(svm);
- vmload(__sme_page_pa(sd->save_area));
+ __svm_vcpu_run(svm, __sme_page_pa(sd->save_area));
}
guest_state_exit_irqoff();
@@ -684,6 +684,6 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
/* vmenter.S */
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm);
-void __svm_vcpu_run(struct vcpu_svm *svm);
+void __svm_vcpu_run(struct vcpu_svm *svm, unsigned long hsave_pa);
#endif
@@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa)
svm_asm1(vmsave, "a" (pa), "memory");
}
-static __always_inline void vmload(unsigned long pa)
-{
- svm_asm1(vmload, "a" (pa), "memory");
-}
-
#endif /* __KVM_X86_SVM_OPS_H */
@@ -35,6 +35,7 @@
/**
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
* @svm: struct vcpu_svm *
+ * @hsave_pa: unsigned long
*/
SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP
@@ -49,6 +50,9 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif
push %_ASM_BX
+ /* @hsave_pa is needed last after vmexit, save it first. */
+ push %_ASM_ARG2
+
/* Save @svm. */
push %_ASM_ARG1
@@ -124,6 +128,11 @@ SYM_FUNC_START(__svm_vcpu_run)
5: vmsave %_ASM_AX
6:
+ /* Pop @hsave_pa and restore GSBASE, allowing access to percpu data. */
+ pop %_ASM_AX
+7: vmload %_ASM_AX
+8:
+
#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
@@ -187,10 +196,14 @@ SYM_FUNC_START(__svm_vcpu_run)
50: cmpb $0, kvm_rebooting
jne 6b
ud2
+70: cmpb $0, kvm_rebooting
+ jne 8b
+ ud2
_ASM_EXTABLE(1b, 10b)
_ASM_EXTABLE(3b, 30b)
_ASM_EXTABLE(5b, 50b)
+ _ASM_EXTABLE(7b, 70b)
SYM_FUNC_END(__svm_vcpu_run)