@@ -149,6 +149,7 @@ KVM_X86_OP_OPTIONAL(migrate_timers)
KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
+KVM_X86_OP(vcpu_deliver_init)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
KVM_X86_OP_OPTIONAL(get_untagged_addr)
KVM_X86_OP_OPTIONAL_RET0(gmem_max_level)
@@ -1836,6 +1836,7 @@ struct kvm_x86_ops {
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
+ void (*vcpu_deliver_init)(struct kvm_vcpu *vcpu);
/*
* Returns vCPU specific APICv inhibit reasons
@@ -2092,6 +2093,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
+void kvm_vcpu_deliver_init(struct kvm_vcpu *vcpu);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
int reason, bool has_error_code, u32 error_code);
@@ -3268,6 +3268,16 @@ int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
return 0;
}
+void kvm_vcpu_deliver_init(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_reset(vcpu, true);
+ if (kvm_vcpu_is_bsp(vcpu))
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ else
+ vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_init);
+
int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -3299,13 +3309,8 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
return 0;
}
- if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
- kvm_vcpu_reset(vcpu, true);
- if (kvm_vcpu_is_bsp(apic->vcpu))
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- else
- vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
- }
+ if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events))
+ static_call(kvm_x86_vcpu_deliver_init)(vcpu);
if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
/* evaluate pending_events before reading the vector */
@@ -5037,6 +5037,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.complete_emulated_msr = svm_complete_emulated_msr,
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
+ .vcpu_deliver_init = kvm_vcpu_deliver_init,
.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
};
@@ -320,6 +320,14 @@ static void vt_enable_smi_window(struct kvm_vcpu *vcpu)
}
#endif
+static bool vt_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return true;
+
+ return vmx_apic_init_signal_blocked(vcpu);
+}
+
static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
@@ -348,6 +356,25 @@ static void vt_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
vmx_deliver_interrupt(apic, delivery_mode, trig_mode, vector);
}
+static void vt_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ kvm_vcpu_deliver_sipi_vector(vcpu, vector);
+}
+
+static void vt_vcpu_deliver_init(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ /* TDX doesn't support INIT. Ignore INIT event */
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ return;
+ }
+
+ kvm_vcpu_deliver_init(vcpu);
+}
+
static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu)) {
@@ -744,13 +771,14 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
#endif
.check_emulate_instruction = vmx_check_emulate_instruction,
- .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+ .apic_init_signal_blocked = vt_apic_init_signal_blocked,
.migrate_timers = vmx_migrate_timers,
.msr_filter_changed = vt_msr_filter_changed,
.complete_emulated_msr = kvm_complete_insn_gp,
- .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+ .vcpu_deliver_sipi_vector = vt_vcpu_deliver_sipi_vector,
+ .vcpu_deliver_init = vt_vcpu_deliver_init,
.get_untagged_addr = vmx_get_untagged_addr,
@@ -769,8 +769,8 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu)
void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
- /* Ignore INIT silently because TDX doesn't support INIT event. */
- if (init_event)
+ /* vcpu_deliver_init method silently discards INIT event. */
+ if (KVM_BUG_ON(init_event, vcpu->kvm))
return;
if (KVM_BUG_ON(is_td_vcpu_created(to_tdx(vcpu)), vcpu->kvm))
return;