[RFC,V5,12/15] x86/sev: Add Check of #HV event in path
Commit Message
From: Tianyu Lan <tiala@microsoft.com>
Add check_hv_pending() and check_hv_pending_after_irq() to
check queued #HV event when irq is disabled.
Signed-off-by: Tianyu Lan <tiala@microsoft.com>
---
arch/x86/entry/entry_64.S | 18 ++++++++++++++++
arch/x86/include/asm/irqflags.h | 14 +++++++++++-
arch/x86/kernel/sev.c | 38 +++++++++++++++++++++++++++++++++
3 files changed, 69 insertions(+), 1 deletion(-)
@@ -1019,6 +1019,15 @@ SYM_CODE_END(paranoid_entry)
* R15 - old SPEC_CTRL
*/
SYM_CODE_START_LOCAL(paranoid_exit)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ /*
+ * If a #HV was delivered during execution and interrupts were
+ * disabled, then check if it can be handled before the iret
+ * (which may re-enable interrupts).
+ */
+ mov %rsp, %rdi
+ call check_hv_pending
+#endif
UNWIND_HINT_REGS
/*
@@ -1143,6 +1152,15 @@ SYM_CODE_START(error_entry)
SYM_CODE_END(error_entry)
SYM_CODE_START_LOCAL(error_return)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ /*
+ * If a #HV was delivered during execution and interrupts were
+ * disabled, then check if it can be handled before the iret
+ * (which may re-enable interrupts).
+ */
+ mov %rsp, %rdi
+ call check_hv_pending
+#endif
UNWIND_HINT_REGS
DEBUG_ENTRY_ASSERT_IRQS_OFF
testb $3, CS(%rsp)
@@ -11,6 +11,10 @@
/*
* Interrupt control:
*/
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+void check_hv_pending(struct pt_regs *regs);
+void check_hv_pending_irq_enable(void);
+#endif
/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
extern inline unsigned long native_save_fl(void);
@@ -40,12 +44,20 @@ static __always_inline void native_irq_disable(void)
static __always_inline void native_irq_enable(void)
{
asm volatile("sti": : :"memory");
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ check_hv_pending_irq_enable();
+#endif
}
static __always_inline void native_safe_halt(void)
{
mds_idle_clear_cpu_buffers();
- asm volatile("sti; hlt": : :"memory");
+ asm volatile("sti": : :"memory");
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ check_hv_pending_irq_enable();
+#endif
+ asm volatile("hlt": : :"memory");
}
static __always_inline void native_halt(void)
@@ -181,6 +181,44 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
}
+static void do_exc_hv(struct pt_regs *regs)
+{
+ /* Handle #HV exception. */
+}
+
+void check_hv_pending(struct pt_regs *regs)
+{
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+
+ if ((regs->flags & X86_EFLAGS_IF) == 0)
+ return;
+
+ do_exc_hv(regs);
+}
+
+void check_hv_pending_irq_enable(void)
+{
+ struct pt_regs regs;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+
+ memset(®s, 0, sizeof(struct pt_regs));
+ asm volatile("movl %%cs, %%eax;" : "=a" (regs.cs));
+ asm volatile("movl %%ss, %%eax;" : "=a" (regs.ss));
+ regs.orig_ax = 0xffffffff;
+ regs.flags = native_save_fl();
+
+ /*
+ * Disable irq when handle pending #HV events after
+ * re-enabling irq.
+ */
+ asm volatile("cli" : : : "memory");
+ do_exc_hv(®s);
+ asm volatile("sti" : : : "memory");
+}
+
void noinstr __sev_es_ist_exit(void)
{
unsigned long ist;