[RFC,V3,15/16] x86/sev: optimize system vector processing invoked from #HV exception
Commit Message
From: Ashish Kalra <ashish.kalra@amd.com>
Construct system vector table and dispatch system vector exceptions through
sysvec_table from #HV exception handler instead of explicitly calling each
system vector. The system vector table is created dynamically and is placed
in a new named ELF section.
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
arch/x86/entry/entry_64.S | 6 +++
arch/x86/kernel/sev.c | 70 +++++++++++++----------------------
arch/x86/kernel/vmlinux.lds.S | 7 ++++
3 files changed, 38 insertions(+), 45 deletions(-)
@@ -412,6 +412,12 @@ SYM_CODE_START(\asmsym)
_ASM_NOKPROBE(\asmsym)
SYM_CODE_END(\asmsym)
+ .if \vector >= FIRST_SYSTEM_VECTOR && \vector < NR_VECTORS
+ .section .system_vectors, "aw"
+ .byte \vector
+ .quad \cfunc
+ .previous
+ .endif
.endm
/*
@@ -151,6 +151,16 @@ struct sev_snp_runtime_data {
static DEFINE_PER_CPU(struct sev_snp_runtime_data*, snp_runtime_data);
+static void (*sysvec_table[NR_VECTORS - FIRST_SYSTEM_VECTOR])
+ (struct pt_regs *regs) __ro_after_init;
+
+struct sysvec_entry {
+ unsigned char vector;
+ void (*sysvec_func)(struct pt_regs *regs);
+} __packed;
+
+extern struct sysvec_entry __system_vectors[], __system_vectors_end[];
+
static inline u64 sev_es_rd_ghcb_msr(void)
{
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
@@ -214,51 +224,11 @@ static void do_exc_hv(struct pt_regs *regs)
} else if (pending_events.vector == IA32_SYSCALL_VECTOR) {
WARN(1, "syscall shouldn't happen\n");
} else if (pending_events.vector >= FIRST_SYSTEM_VECTOR) {
- switch (pending_events.vector) {
-#if IS_ENABLED(CONFIG_HYPERV)
- case HYPERV_STIMER0_VECTOR:
- sysvec_hyperv_stimer0(regs);
- break;
- case HYPERVISOR_CALLBACK_VECTOR:
- sysvec_hyperv_callback(regs);
- break;
-#endif
-#ifdef CONFIG_SMP
- case RESCHEDULE_VECTOR:
- sysvec_reschedule_ipi(regs);
- break;
- case IRQ_MOVE_CLEANUP_VECTOR:
- sysvec_irq_move_cleanup(regs);
- break;
- case REBOOT_VECTOR:
- sysvec_reboot(regs);
- break;
- case CALL_FUNCTION_SINGLE_VECTOR:
- sysvec_call_function_single(regs);
- break;
- case CALL_FUNCTION_VECTOR:
- sysvec_call_function(regs);
- break;
-#endif
-#ifdef CONFIG_X86_LOCAL_APIC
- case ERROR_APIC_VECTOR:
- sysvec_error_interrupt(regs);
- break;
- case SPURIOUS_APIC_VECTOR:
- sysvec_spurious_apic_interrupt(regs);
- break;
- case LOCAL_TIMER_VECTOR:
- sysvec_apic_timer_interrupt(regs);
- break;
- case X86_PLATFORM_IPI_VECTOR:
- sysvec_x86_platform_ipi(regs);
- break;
-#endif
- case 0x0:
- break;
- default:
- panic("Unexpected vector %d\n", vector);
- unreachable();
+ if (!(sysvec_table[pending_events.vector - FIRST_SYSTEM_VECTOR])) {
+ WARN(1, "system vector entry 0x%x is NULL\n",
+ pending_events.vector);
+ } else {
+ (*sysvec_table[pending_events.vector - FIRST_SYSTEM_VECTOR])(regs);
}
} else {
common_interrupt(regs, pending_events.vector);
@@ -376,6 +346,14 @@ static bool sev_restricted_injection_enabled(void)
return sev_status & MSR_AMD64_SEV_RESTRICTED_INJECTION_ENABLED;
}
+static void __init construct_sysvec_table(void)
+{
+ struct sysvec_entry *p;
+
+ for (p = __system_vectors; p < __system_vectors_end; p++)
+ sysvec_table[p->vector - FIRST_SYSTEM_VECTOR] = p->sysvec_func;
+}
+
void __init sev_snp_init_hv_handling(void)
{
struct sev_snp_runtime_data *snp_data;
@@ -403,6 +381,8 @@ void __init sev_snp_init_hv_handling(void)
apic_set_eoi_write(hv_doorbell_apic_eoi_write);
local_irq_restore(flags);
+
+ construct_sysvec_table();
}
static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
@@ -339,6 +339,13 @@ SECTIONS
*(.altinstr_replacement)
}
+ . = ALIGN(8);
+ .system_vectors : AT(ADDR(.system_vectors) - LOAD_OFFSET) {
+ __system_vectors = .;
+ *(.system_vectors)
+ __system_vectors_end = .;
+ }
+
. = ALIGN(8);
.apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
__apicdrivers = .;