@@ -158,6 +158,23 @@ static void vt_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx_vcpu_reset(vcpu, init_event);
}
+static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ /* Unconditionally continue to vcpu_run(). */
+ return 1;
+
+ return vmx_vcpu_pre_run(vcpu);
+}
+
+static fastpath_t vt_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return tdx_vcpu_run(vcpu);
+
+ return vmx_vcpu_run(vcpu);
+}
+
static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu)) {
@@ -343,8 +360,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.flush_tlb_gva = vt_flush_tlb_gva,
.flush_tlb_guest = vt_flush_tlb_guest,
- .vcpu_pre_run = vmx_vcpu_pre_run,
- .vcpu_run = vmx_vcpu_run,
+ .vcpu_pre_run = vt_vcpu_pre_run,
+ .vcpu_run = vt_vcpu_run,
.handle_exit = vmx_handle_exit,
.skip_emulated_instruction = vmx_skip_emulated_instruction,
.update_emulated_instruction = vmx_update_emulated_instruction,
@@ -11,6 +11,9 @@
#include "vmx.h"
#include "x86.h"
+#include <trace/events/kvm.h>
+#include "trace.h"
+
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -491,6 +494,87 @@ void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
*/
}
+static noinstr void tdx_vcpu_enter_exit(struct vcpu_tdx *tdx)
+{
+ struct tdx_module_args args;
+
+ /*
+ * Avoid section mismatch with to_tdx() with KVM_VM_BUG(). The caller
+ * should call to_tdx().
+ */
+ struct kvm_vcpu *vcpu = &tdx->vcpu;
+
+ guest_state_enter_irqoff();
+
+ /*
+ * TODO: optimization:
+ * - Eliminate copy between args and vcpu->arch.regs.
+ * - copyin/copyout registers only if (tdx->tdvmvall.regs_mask != 0)
+ * which means TDG.VP.VMCALL.
+ */
+ args = (struct tdx_module_args) {
+ .rcx = tdx->tdvpr_pa,
+#define REG(reg, REG) .reg = vcpu->arch.regs[VCPU_REGS_ ## REG]
+ REG(rdx, RDX),
+ REG(r8, R8),
+ REG(r9, R9),
+ REG(r10, R10),
+ REG(r11, R11),
+ REG(r12, R12),
+ REG(r13, R13),
+ REG(r14, R14),
+ REG(r15, R15),
+ REG(rbx, RBX),
+ REG(rdi, RDI),
+ REG(rsi, RSI),
+#undef REG
+ };
+
+ tdx->exit_reason.full = __seamcall_saved_ret(TDH_VP_ENTER, &args);
+
+#define REG(reg, REG) vcpu->arch.regs[VCPU_REGS_ ## REG] = args.reg
+ REG(rcx, RCX);
+ REG(rdx, RDX);
+ REG(r8, R8);
+ REG(r9, R9);
+ REG(r10, R10);
+ REG(r11, R11);
+ REG(r12, R12);
+ REG(r13, R13);
+ REG(r14, R14);
+ REG(r15, R15);
+ REG(rbx, RBX);
+ REG(rdi, RDI);
+ REG(rsi, RSI);
+#undef REG
+
+ WARN_ON_ONCE(!kvm_rebooting &&
+ (tdx->exit_reason.full & TDX_SW_ERROR) == TDX_SW_ERROR);
+
+ guest_state_exit_irqoff();
+}
+
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ if (unlikely(!tdx->initialized))
+ return -EINVAL;
+ if (unlikely(vcpu->kvm->vm_bugged)) {
+ tdx->exit_reason.full = TDX_NON_RECOVERABLE_VCPU;
+ return EXIT_FASTPATH_NONE;
+ }
+
+ trace_kvm_entry(vcpu);
+
+ tdx_vcpu_enter_exit(tdx);
+
+ vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
+ trace_kvm_exit(vcpu, KVM_ISA_VMX);
+
+ return EXIT_FASTPATH_NONE;
+}
+
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
{
WARN_ON_ONCE(root_hpa & ~PAGE_MASK);
@@ -27,6 +27,37 @@ struct kvm_tdx {
struct page *source_page;
};
+union tdx_exit_reason {
+ struct {
+ /* 31:0 mirror the VMX Exit Reason format */
+ u64 basic : 16;
+ u64 reserved16 : 1;
+ u64 reserved17 : 1;
+ u64 reserved18 : 1;
+ u64 reserved19 : 1;
+ u64 reserved20 : 1;
+ u64 reserved21 : 1;
+ u64 reserved22 : 1;
+ u64 reserved23 : 1;
+ u64 reserved24 : 1;
+ u64 reserved25 : 1;
+ u64 bus_lock_detected : 1;
+ u64 enclave_mode : 1;
+ u64 smi_pending_mtf : 1;
+ u64 smi_from_vmx_root : 1;
+ u64 reserved30 : 1;
+ u64 failed_vmentry : 1;
+
+ /* 63:32 are TDX specific */
+ u64 details_l1 : 8;
+ u64 class : 8;
+ u64 reserved61_48 : 14;
+ u64 non_recoverable : 1;
+ u64 error : 1;
+ };
+ u64 full;
+};
+
struct vcpu_tdx {
struct kvm_vcpu vcpu;
@@ -34,6 +65,8 @@ struct vcpu_tdx {
unsigned long *tdvpx_pa;
bool td_vcpu_created;
+ union tdx_exit_reason exit_reason;
+
bool initialized;
/*
@@ -150,6 +150,7 @@ int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
int tdx_vcpu_create(struct kvm_vcpu *vcpu);
void tdx_vcpu_free(struct kvm_vcpu *vcpu);
void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu);
u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
@@ -184,6 +185,7 @@ static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOP
static inline int tdx_vcpu_create(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; }
static inline void tdx_vcpu_free(struct kvm_vcpu *vcpu) {}
static inline void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {}
+static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu) { return EXIT_FASTPATH_NONE; }
static inline u8 tdx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { return 0; }
static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }