[v10,039/108] KVM: VMX: Introduce test mode related to EPT violation VE

Message ID 8b3101711c5291246653efb50cc2975863d3a8ab.1667110240.git.isaku.yamahata@intel.com
State New
Headers
Series KVM TDX basic feature support |

Commit Message

Isaku Yamahata Oct. 30, 2022, 6:22 a.m. UTC
  From: Isaku Yamahata <isaku.yamahata@intel.com>

To support TDX, KVM is enhanced to operate with #VE.  For TDX, KVM programs
to inject #VE conditionally and set #VE suppress bit in EPT entry.  For VMX
case, #VE isn't used.  If #VE happens for VMX, it's a bug.  To be
defensive (test that VMX case isn't broken), introduce option
ept_violation_ve_test and when it's set, set error.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
 arch/x86/include/asm/vmx.h | 12 +++++++
 arch/x86/kvm/vmx/vmcs.h    |  5 +++
 arch/x86/kvm/vmx/vmx.c     | 69 +++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/vmx/vmx.h     |  6 +++-
 4 files changed, 90 insertions(+), 2 deletions(-)
  

Comments

Binbin Wu Nov. 3, 2022, 1:41 p.m. UTC | #1
On 2022/10/30 14:22, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
>
> To support TDX, KVM is enhanced to operate with #VE.  For TDX, KVM programs
> to inject #VE conditionally and set #VE suppress bit in EPT entry.  For VMX
> case, #VE isn't used.  If #VE happens for VMX, it's a bug.  To be
> defensive (test that VMX case isn't broken), introduce option
> ept_violation_ve_test and when it's set, set error.
>
> Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
>   arch/x86/include/asm/vmx.h | 12 +++++++
>   arch/x86/kvm/vmx/vmcs.h    |  5 +++
>   arch/x86/kvm/vmx/vmx.c     | 69 +++++++++++++++++++++++++++++++++++++-
>   arch/x86/kvm/vmx/vmx.h     |  6 +++-
>   4 files changed, 90 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index cdbf12c1a83c..752d53652007 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -68,6 +68,7 @@
>   #define SECONDARY_EXEC_ENCLS_EXITING		VMCS_CONTROL_BIT(ENCLS_EXITING)
>   #define SECONDARY_EXEC_RDSEED_EXITING		VMCS_CONTROL_BIT(RDSEED_EXITING)
>   #define SECONDARY_EXEC_ENABLE_PML               VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
> +#define SECONDARY_EXEC_EPT_VIOLATION_VE		VMCS_CONTROL_BIT(EPT_VIOLATION_VE)
>   #define SECONDARY_EXEC_PT_CONCEAL_VMX		VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
>   #define SECONDARY_EXEC_XSAVES			VMCS_CONTROL_BIT(XSAVES)
>   #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC	VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
> @@ -223,6 +224,8 @@ enum vmcs_field {
>   	VMREAD_BITMAP_HIGH              = 0x00002027,
>   	VMWRITE_BITMAP                  = 0x00002028,
>   	VMWRITE_BITMAP_HIGH             = 0x00002029,
> +	VE_INFORMATION_ADDRESS		= 0x0000202A,
> +	VE_INFORMATION_ADDRESS_HIGH	= 0x0000202B,
>   	XSS_EXIT_BITMAP                 = 0x0000202C,
>   	XSS_EXIT_BITMAP_HIGH            = 0x0000202D,
>   	ENCLS_EXITING_BITMAP		= 0x0000202E,
> @@ -628,4 +631,13 @@ enum vmx_l1d_flush_state {
>   
>   extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
>   
> +struct vmx_ve_information {
> +	u32 exit_reason;
> +	u32 delivery;
> +	u64 exit_qualification;
> +	u64 guest_linear_address;
> +	u64 guest_physical_address;
> +	u16 eptp_index;
> +};
> +
>   #endif
> diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
> index ac290a44a693..9277676057a7 100644
> --- a/arch/x86/kvm/vmx/vmcs.h
> +++ b/arch/x86/kvm/vmx/vmcs.h
> @@ -140,6 +140,11 @@ static inline bool is_nm_fault(u32 intr_info)
>   	return is_exception_n(intr_info, NM_VECTOR);
>   }
>   
> +static inline bool is_ve_fault(u32 intr_info)
> +{
> +	return is_exception_n(intr_info, VE_VECTOR);
> +}
> +
>   /* Undocumented: icebp/int1 */
>   static inline bool is_icebp(u32 intr_info)
>   {
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index f890191e8580..dd3fde9d3c32 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -126,6 +126,9 @@ module_param(error_on_inconsistent_vmcs_config, bool, 0444);
>   static bool __read_mostly dump_invalid_vmcs = 0;
>   module_param(dump_invalid_vmcs, bool, 0644);
>   
> +static bool __read_mostly ept_violation_ve_test;
> +module_param(ept_violation_ve_test, bool, 0444);
> +
>   #define MSR_BITMAP_MODE_X2APIC		1
>   #define MSR_BITMAP_MODE_X2APIC_APICV	2
>   
> @@ -783,6 +786,13 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
>   
>   	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
>   	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
> +	/*
> +	 * #VE isn't used for VMX, but for TDX.  To test against unexpected
> +	 * change related to #VE for VMX, intercept unexpected #VE and warn on
> +	 * it.
> +	 */
> +	if (ept_violation_ve_test)
> +		eb |= 1u << VE_VECTOR;
>   	/*
>   	 * Guest access to VMware backdoor ports could legitimately
>   	 * trigger #GP because of TSS I/O permission bitmap.
> @@ -2644,6 +2654,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
>   					&_cpu_based_2nd_exec_control))
>   			return -EIO;
>   	}
> +	if (!ept_violation_ve_test)
> +		_cpu_based_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;

should be _cpu_based_2nd_exec_control


> +
>   #ifndef CONFIG_X86_64
>   	if (!(_cpu_based_2nd_exec_control &
>   				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
> @@ -2668,6 +2681,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
>   			return -EIO;
>   
>   		vmx_cap->ept = 0;
> +		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
>   	}
>   	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
>   	    vmx_cap->vpid) {
> @@ -4510,6 +4524,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
>   		exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
>   	if (!enable_ept) {
>   		exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
> +		exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
>   		enable_unrestricted_guest = 0;
>   	}
>   	if (!enable_unrestricted_guest)
> @@ -4637,8 +4652,40 @@ static void init_vmcs(struct vcpu_vmx *vmx)
>   
>   	exec_controls_set(vmx, vmx_exec_control(vmx));
>   
> -	if (cpu_has_secondary_exec_ctrls())
> +	if (cpu_has_secondary_exec_ctrls()) {
>   		secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
> +		if (secondary_exec_controls_get(vmx) &
> +		    SECONDARY_EXEC_EPT_VIOLATION_VE) {
> +			if (!vmx->ve_info) {
> +				/* ve_info must be page aligned. */
> +				struct page *page;
> +
> +				BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
> +				page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
> +				if (page)
> +					vmx->ve_info = page_to_virt(page);
> +			}
> +			if (vmx->ve_info) {
> +				/*
> +				 * Allow #VE delivery. CPU sets this field to
> +				 * 0xFFFFFFFF on #VE delivery.  Another #VE can
> +				 * occur only if software clears the field.
> +				 */
> +				vmx->ve_info->delivery = 0;
> +				vmcs_write64(VE_INFORMATION_ADDRESS,
> +					     __pa(vmx->ve_info));
> +			} else {
> +				/*
> +				 * Because SECONDARY_EXEC_EPT_VIOLATION_VE is
> +				 * used only when ept_violation_ve_test is true,
> +				 * it's okay to go with the bit disabled.
> +				 */
> +				pr_err("Failed to allocate ve_info. disabling EPT_VIOLATION_VE.\n");
> +				secondary_exec_controls_clearbit(vmx,
> +								 SECONDARY_EXEC_EPT_VIOLATION_VE);
> +			}
> +		}
> +	}
>   
>   	if (cpu_has_tertiary_exec_ctrls())
>   		tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
> @@ -5118,6 +5165,12 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
>   	if (is_invalid_opcode(intr_info))
>   		return handle_ud(vcpu);
>   
> +	/*
> +	 * #VE isn't supposed to happen.  Although vcpu can send
> +	 */
> +	if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
> +		return -EIO;
> +
>   	error_code = 0;
>   	if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
>   		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
> @@ -6306,6 +6359,18 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
>   	if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
>   		pr_err("Virtual processor ID = 0x%04x\n",
>   		       vmcs_read16(VIRTUAL_PROCESSOR_ID));
> +	if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
> +		struct vmx_ve_information *ve_info;
> +
> +		pr_err("VE info address = 0x%016llx\n",
> +		       vmcs_read64(VE_INFORMATION_ADDRESS));
> +		ve_info = __va(vmcs_read64(VE_INFORMATION_ADDRESS));
> +		pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
> +		       ve_info->exit_reason, ve_info->delivery,
> +		       ve_info->exit_qualification,
> +		       ve_info->guest_linear_address,
> +		       ve_info->guest_physical_address, ve_info->eptp_index);
> +	}
>   }
>   
>   /*
> @@ -7302,6 +7367,8 @@ void vmx_vcpu_free(struct kvm_vcpu *vcpu)
>   	free_vpid(vmx->vpid);
>   	nested_vmx_free_vcpu(vcpu);
>   	free_loaded_vmcs(vmx->loaded_vmcs);
> +	if (vmx->ve_info)
> +		free_page((unsigned long)vmx->ve_info);
>   }
>   
>   int vmx_vcpu_create(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
> index d49d0ace9fb8..1813caeb24d8 100644
> --- a/arch/x86/kvm/vmx/vmx.h
> +++ b/arch/x86/kvm/vmx/vmx.h
> @@ -359,6 +359,9 @@ struct vcpu_vmx {
>   		DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
>   		DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
>   	} shadow_msr_intercept;
> +
> +	/* ve_info must be page aligned. */
> +	struct vmx_ve_information *ve_info;
>   };
>   
>   struct kvm_vmx {
> @@ -570,7 +573,8 @@ static inline u8 vmx_get_rvi(void)
>   	 SECONDARY_EXEC_ENABLE_VMFUNC |					\
>   	 SECONDARY_EXEC_BUS_LOCK_DETECTION |				\
>   	 SECONDARY_EXEC_NOTIFY_VM_EXITING |				\
> -	 SECONDARY_EXEC_ENCLS_EXITING)
> +	 SECONDARY_EXEC_ENCLS_EXITING |					\
> +	 SECONDARY_EXEC_EPT_VIOLATION_VE)
>   
>   #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
>   #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL			\
  
Isaku Yamahata Nov. 3, 2022, 8:13 p.m. UTC | #2
On Thu, Nov 03, 2022 at 09:41:44PM +0800,
Binbin Wu <binbin.wu@linux.intel.com> wrote:

> 
> On 2022/10/30 14:22, isaku.yamahata@intel.com wrote:
> > From: Isaku Yamahata <isaku.yamahata@intel.com>
> > 
> > To support TDX, KVM is enhanced to operate with #VE.  For TDX, KVM programs
> > to inject #VE conditionally and set #VE suppress bit in EPT entry.  For VMX
> > case, #VE isn't used.  If #VE happens for VMX, it's a bug.  To be
> > defensive (test that VMX case isn't broken), introduce option
> > ept_violation_ve_test and when it's set, set error.
> > 
> > Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
> > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> > ---
> >   arch/x86/include/asm/vmx.h | 12 +++++++
> >   arch/x86/kvm/vmx/vmcs.h    |  5 +++
> >   arch/x86/kvm/vmx/vmx.c     | 69 +++++++++++++++++++++++++++++++++++++-
> >   arch/x86/kvm/vmx/vmx.h     |  6 +++-
> >   4 files changed, 90 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> > index cdbf12c1a83c..752d53652007 100644
> > --- a/arch/x86/include/asm/vmx.h
> > +++ b/arch/x86/include/asm/vmx.h
> > @@ -68,6 +68,7 @@
> >   #define SECONDARY_EXEC_ENCLS_EXITING		VMCS_CONTROL_BIT(ENCLS_EXITING)
> >   #define SECONDARY_EXEC_RDSEED_EXITING		VMCS_CONTROL_BIT(RDSEED_EXITING)
> >   #define SECONDARY_EXEC_ENABLE_PML               VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
> > +#define SECONDARY_EXEC_EPT_VIOLATION_VE		VMCS_CONTROL_BIT(EPT_VIOLATION_VE)
> >   #define SECONDARY_EXEC_PT_CONCEAL_VMX		VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
> >   #define SECONDARY_EXEC_XSAVES			VMCS_CONTROL_BIT(XSAVES)
> >   #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC	VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
> > @@ -223,6 +224,8 @@ enum vmcs_field {
> >   	VMREAD_BITMAP_HIGH              = 0x00002027,
> >   	VMWRITE_BITMAP                  = 0x00002028,
> >   	VMWRITE_BITMAP_HIGH             = 0x00002029,
> > +	VE_INFORMATION_ADDRESS		= 0x0000202A,
> > +	VE_INFORMATION_ADDRESS_HIGH	= 0x0000202B,
> >   	XSS_EXIT_BITMAP                 = 0x0000202C,
> >   	XSS_EXIT_BITMAP_HIGH            = 0x0000202D,
> >   	ENCLS_EXITING_BITMAP		= 0x0000202E,
> > @@ -628,4 +631,13 @@ enum vmx_l1d_flush_state {
> >   extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
> > +struct vmx_ve_information {
> > +	u32 exit_reason;
> > +	u32 delivery;
> > +	u64 exit_qualification;
> > +	u64 guest_linear_address;
> > +	u64 guest_physical_address;
> > +	u16 eptp_index;
> > +};
> > +
> >   #endif
> > diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
> > index ac290a44a693..9277676057a7 100644
> > --- a/arch/x86/kvm/vmx/vmcs.h
> > +++ b/arch/x86/kvm/vmx/vmcs.h
> > @@ -140,6 +140,11 @@ static inline bool is_nm_fault(u32 intr_info)
> >   	return is_exception_n(intr_info, NM_VECTOR);
> >   }
> > +static inline bool is_ve_fault(u32 intr_info)
> > +{
> > +	return is_exception_n(intr_info, VE_VECTOR);
> > +}
> > +
> >   /* Undocumented: icebp/int1 */
> >   static inline bool is_icebp(u32 intr_info)
> >   {
> > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > index f890191e8580..dd3fde9d3c32 100644
> > --- a/arch/x86/kvm/vmx/vmx.c
> > +++ b/arch/x86/kvm/vmx/vmx.c
> > @@ -126,6 +126,9 @@ module_param(error_on_inconsistent_vmcs_config, bool, 0444);
> >   static bool __read_mostly dump_invalid_vmcs = 0;
> >   module_param(dump_invalid_vmcs, bool, 0644);
> > +static bool __read_mostly ept_violation_ve_test;
> > +module_param(ept_violation_ve_test, bool, 0444);
> > +
> >   #define MSR_BITMAP_MODE_X2APIC		1
> >   #define MSR_BITMAP_MODE_X2APIC_APICV	2
> > @@ -783,6 +786,13 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
> >   	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
> >   	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
> > +	/*
> > +	 * #VE isn't used for VMX, but for TDX.  To test against unexpected
> > +	 * change related to #VE for VMX, intercept unexpected #VE and warn on
> > +	 * it.
> > +	 */
> > +	if (ept_violation_ve_test)
> > +		eb |= 1u << VE_VECTOR;
> >   	/*
> >   	 * Guest access to VMware backdoor ports could legitimately
> >   	 * trigger #GP because of TSS I/O permission bitmap.
> > @@ -2644,6 +2654,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
> >   					&_cpu_based_2nd_exec_control))
> >   			return -EIO;
> >   	}
> > +	if (!ept_violation_ve_test)
> > +		_cpu_based_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
> 
> should be _cpu_based_2nd_exec_control

Oops, thanks.
  

Patch

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index cdbf12c1a83c..752d53652007 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -68,6 +68,7 @@ 
 #define SECONDARY_EXEC_ENCLS_EXITING		VMCS_CONTROL_BIT(ENCLS_EXITING)
 #define SECONDARY_EXEC_RDSEED_EXITING		VMCS_CONTROL_BIT(RDSEED_EXITING)
 #define SECONDARY_EXEC_ENABLE_PML               VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
+#define SECONDARY_EXEC_EPT_VIOLATION_VE		VMCS_CONTROL_BIT(EPT_VIOLATION_VE)
 #define SECONDARY_EXEC_PT_CONCEAL_VMX		VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
 #define SECONDARY_EXEC_XSAVES			VMCS_CONTROL_BIT(XSAVES)
 #define SECONDARY_EXEC_MODE_BASED_EPT_EXEC	VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
@@ -223,6 +224,8 @@  enum vmcs_field {
 	VMREAD_BITMAP_HIGH              = 0x00002027,
 	VMWRITE_BITMAP                  = 0x00002028,
 	VMWRITE_BITMAP_HIGH             = 0x00002029,
+	VE_INFORMATION_ADDRESS		= 0x0000202A,
+	VE_INFORMATION_ADDRESS_HIGH	= 0x0000202B,
 	XSS_EXIT_BITMAP                 = 0x0000202C,
 	XSS_EXIT_BITMAP_HIGH            = 0x0000202D,
 	ENCLS_EXITING_BITMAP		= 0x0000202E,
@@ -628,4 +631,13 @@  enum vmx_l1d_flush_state {
 
 extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
 
+struct vmx_ve_information {
+	u32 exit_reason;
+	u32 delivery;
+	u64 exit_qualification;
+	u64 guest_linear_address;
+	u64 guest_physical_address;
+	u16 eptp_index;
+};
+
 #endif
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
index ac290a44a693..9277676057a7 100644
--- a/arch/x86/kvm/vmx/vmcs.h
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -140,6 +140,11 @@  static inline bool is_nm_fault(u32 intr_info)
 	return is_exception_n(intr_info, NM_VECTOR);
 }
 
+static inline bool is_ve_fault(u32 intr_info)
+{
+	return is_exception_n(intr_info, VE_VECTOR);
+}
+
 /* Undocumented: icebp/int1 */
 static inline bool is_icebp(u32 intr_info)
 {
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f890191e8580..dd3fde9d3c32 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -126,6 +126,9 @@  module_param(error_on_inconsistent_vmcs_config, bool, 0444);
 static bool __read_mostly dump_invalid_vmcs = 0;
 module_param(dump_invalid_vmcs, bool, 0644);
 
+static bool __read_mostly ept_violation_ve_test;
+module_param(ept_violation_ve_test, bool, 0444);
+
 #define MSR_BITMAP_MODE_X2APIC		1
 #define MSR_BITMAP_MODE_X2APIC_APICV	2
 
@@ -783,6 +786,13 @@  void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
 
 	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
 	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
+	/*
+	 * #VE isn't used for VMX, but for TDX.  To test against unexpected
+	 * change related to #VE for VMX, intercept unexpected #VE and warn on
+	 * it.
+	 */
+	if (ept_violation_ve_test)
+		eb |= 1u << VE_VECTOR;
 	/*
 	 * Guest access to VMware backdoor ports could legitimately
 	 * trigger #GP because of TSS I/O permission bitmap.
@@ -2644,6 +2654,9 @@  static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 					&_cpu_based_2nd_exec_control))
 			return -EIO;
 	}
+	if (!ept_violation_ve_test)
+		_cpu_based_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
+
 #ifndef CONFIG_X86_64
 	if (!(_cpu_based_2nd_exec_control &
 				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
@@ -2668,6 +2681,7 @@  static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 			return -EIO;
 
 		vmx_cap->ept = 0;
+		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
 	}
 	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
 	    vmx_cap->vpid) {
@@ -4510,6 +4524,7 @@  static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
 		exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
 	if (!enable_ept) {
 		exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+		exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE;
 		enable_unrestricted_guest = 0;
 	}
 	if (!enable_unrestricted_guest)
@@ -4637,8 +4652,40 @@  static void init_vmcs(struct vcpu_vmx *vmx)
 
 	exec_controls_set(vmx, vmx_exec_control(vmx));
 
-	if (cpu_has_secondary_exec_ctrls())
+	if (cpu_has_secondary_exec_ctrls()) {
 		secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
+		if (secondary_exec_controls_get(vmx) &
+		    SECONDARY_EXEC_EPT_VIOLATION_VE) {
+			if (!vmx->ve_info) {
+				/* ve_info must be page aligned. */
+				struct page *page;
+
+				BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
+				page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+				if (page)
+					vmx->ve_info = page_to_virt(page);
+			}
+			if (vmx->ve_info) {
+				/*
+				 * Allow #VE delivery. CPU sets this field to
+				 * 0xFFFFFFFF on #VE delivery.  Another #VE can
+				 * occur only if software clears the field.
+				 */
+				vmx->ve_info->delivery = 0;
+				vmcs_write64(VE_INFORMATION_ADDRESS,
+					     __pa(vmx->ve_info));
+			} else {
+				/*
+				 * Because SECONDARY_EXEC_EPT_VIOLATION_VE is
+				 * used only when ept_violation_ve_test is true,
+				 * it's okay to go with the bit disabled.
+				 */
+				pr_err("Failed to allocate ve_info. disabling EPT_VIOLATION_VE.\n");
+				secondary_exec_controls_clearbit(vmx,
+								 SECONDARY_EXEC_EPT_VIOLATION_VE);
+			}
+		}
+	}
 
 	if (cpu_has_tertiary_exec_ctrls())
 		tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
@@ -5118,6 +5165,12 @@  static int handle_exception_nmi(struct kvm_vcpu *vcpu)
 	if (is_invalid_opcode(intr_info))
 		return handle_ud(vcpu);
 
+	/*
+	 * #VE isn't supposed to happen.  Although vcpu can send
+	 */
+	if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
+		return -EIO;
+
 	error_code = 0;
 	if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
 		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
@@ -6306,6 +6359,18 @@  void dump_vmcs(struct kvm_vcpu *vcpu)
 	if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
 		pr_err("Virtual processor ID = 0x%04x\n",
 		       vmcs_read16(VIRTUAL_PROCESSOR_ID));
+	if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE) {
+		struct vmx_ve_information *ve_info;
+
+		pr_err("VE info address = 0x%016llx\n",
+		       vmcs_read64(VE_INFORMATION_ADDRESS));
+		ve_info = __va(vmcs_read64(VE_INFORMATION_ADDRESS));
+		pr_err("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n",
+		       ve_info->exit_reason, ve_info->delivery,
+		       ve_info->exit_qualification,
+		       ve_info->guest_linear_address,
+		       ve_info->guest_physical_address, ve_info->eptp_index);
+	}
 }
 
 /*
@@ -7302,6 +7367,8 @@  void vmx_vcpu_free(struct kvm_vcpu *vcpu)
 	free_vpid(vmx->vpid);
 	nested_vmx_free_vcpu(vcpu);
 	free_loaded_vmcs(vmx->loaded_vmcs);
+	if (vmx->ve_info)
+		free_page((unsigned long)vmx->ve_info);
 }
 
 int vmx_vcpu_create(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index d49d0ace9fb8..1813caeb24d8 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -359,6 +359,9 @@  struct vcpu_vmx {
 		DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
 		DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
 	} shadow_msr_intercept;
+
+	/* ve_info must be page aligned. */
+	struct vmx_ve_information *ve_info;
 };
 
 struct kvm_vmx {
@@ -570,7 +573,8 @@  static inline u8 vmx_get_rvi(void)
 	 SECONDARY_EXEC_ENABLE_VMFUNC |					\
 	 SECONDARY_EXEC_BUS_LOCK_DETECTION |				\
 	 SECONDARY_EXEC_NOTIFY_VM_EXITING |				\
-	 SECONDARY_EXEC_ENCLS_EXITING)
+	 SECONDARY_EXEC_ENCLS_EXITING |					\
+	 SECONDARY_EXEC_EPT_VIOLATION_VE)
 
 #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
 #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL			\