[3/8] KVM: SVM: Drop 32-bit "support" from __svm_sev_es_vcpu_run()

Message ID 20240223204233.3337324-4-seanjc@google.com
State New
Headers
Series KVM: SVM: Clean up VMRUN=>#VMEXIT assembly |

Commit Message

Sean Christopherson Feb. 23, 2024, 8:42 p.m. UTC
  Drop 32-bit "support" from __svm_sev_es_vcpu_run(), as SEV/SEV-ES firmly
64-bit only.  The "support" was purely the result of bad copy+paste from
__svm_vcpu_run(), which in turn was slightly less bad copy+paste from
__vmx_vcpu_run().

Opportunistically convert to unadulterated register accesses so that it's
easier (but still not easy) to follow which registers hold what arguments,
and when.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/svm/vmenter.S | 44 +++++++++++---------------------------
 1 file changed, 13 insertions(+), 31 deletions(-)
  

Patch

diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 7ee363d7517c..0026b4a56d25 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -298,17 +298,12 @@  SYM_FUNC_END(__svm_vcpu_run)
  * @spec_ctrl_intercepted: bool
  */
 SYM_FUNC_START(__svm_sev_es_vcpu_run)
-	push %_ASM_BP
-#ifdef CONFIG_X86_64
+	push %rbp
 	push %r15
 	push %r14
 	push %r13
 	push %r12
-#else
-	push %edi
-	push %esi
-#endif
-	push %_ASM_BX
+	push %rbx
 
 	/*
 	 * Save variables needed after vmexit on the stack, in inverse
@@ -316,39 +311,31 @@  SYM_FUNC_START(__svm_sev_es_vcpu_run)
 	 */
 
 	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
-	push %_ASM_ARG2
+	push %rsi
 
 	/* Save @svm. */
-	push %_ASM_ARG1
-
-.ifnc _ASM_ARG1, _ASM_DI
-	/*
-	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
-	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
-	 */
-	mov %_ASM_ARG1, %_ASM_DI
-.endif
+	push %rdi
 
 	/* Clobbers RAX, RCX, RDX.  */
 	RESTORE_GUEST_SPEC_CTRL
 
 	/* Get svm->current_vmcb->pa into RAX. */
-	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
-	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+	mov SVM_current_vmcb(%rdi), %rax
+	mov KVM_VMCB_pa(%rax), %rax
 
 	/* Enter guest mode */
 	sti
 
-1:	vmrun %_ASM_AX
+1:	vmrun %rax
 
 2:	cli
 
 	/* Pop @svm to RDI, guest registers have been saved already. */
-	pop %_ASM_DI
+	pop %rdi
 
 #ifdef CONFIG_RETPOLINE
 	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
-	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+	FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
 #endif
 
 	/* Clobbers RAX, RCX, RDX.  */
@@ -364,26 +351,21 @@  SYM_FUNC_START(__svm_sev_es_vcpu_run)
 	UNTRAIN_RET_VM
 
 	/* "Pop" @spec_ctrl_intercepted.  */
-	pop %_ASM_BX
+	pop %rbx
 
-	pop %_ASM_BX
+	pop %rbx
 
-#ifdef CONFIG_X86_64
 	pop %r12
 	pop %r13
 	pop %r14
 	pop %r15
-#else
-	pop %esi
-	pop %edi
-#endif
-	pop %_ASM_BP
+	pop %rbp
 	RET
 
 	RESTORE_GUEST_SPEC_CTRL_BODY
 	RESTORE_HOST_SPEC_CTRL_BODY
 
-3:	cmpb $0, _ASM_RIP(kvm_rebooting)
+3:	cmpb $0, kvm_rebooting(%rip)
 	jne 2b
 	ud2