[4/6] KVM: x86: LASS protection on KVM emulation when LASS enabled

Message ID 20230420133724.11398-5-guang.zeng@intel.com
State New
Headers
Series LASS KVM virtualization support |

Commit Message

Zeng Guang April 20, 2023, 1:37 p.m. UTC
  Do LASS violation check for instructions emulated by KVM. Note that for
instructions executed in the guest directly, hardware will perform the
check.

Not all instruction emulation leads to accesses to guest linear addresses
because 1) some instrutions like CPUID, RDMSR, don't take memory as
operands 2) instruction fetch in most cases is already done inside the
guest.

Four cases in which kvm may access guest linear addresses are identified
by code inspection:
- KVM emulator uses segmented address for instruction fetches or data
  accesses.
- For implicit data access, KVM emulator gets address to a system data
  structure(GDT/LDT/IDT/TR).
- For VMX instruction emulation, KVM gets the address from "VM-exit
  instruction information" field in VMCS.
- For SGX ENCLS instruction emulation, KVM gets the address from registers.

LASS violation check applies to these linear address so as to enforce
mode-based protections as hardware behaves.

As exceptions, the target memory address of emulation of invlpg, branch
and call instructions doesn't require LASS violation check.

Signed-off-by: Zeng Guang <guang.zeng@intel.com>
---
 arch/x86/kvm/emulate.c    | 36 +++++++++++++++++++++++++++++++-----
 arch/x86/kvm/vmx/nested.c |  3 +++
 arch/x86/kvm/vmx/sgx.c    |  2 ++
 3 files changed, 36 insertions(+), 5 deletions(-)
  

Comments

Binbin Wu April 25, 2023, 2:52 a.m. UTC | #1
On 4/20/2023 9:37 PM, Zeng Guang wrote:
> Do LASS violation check for instructions emulated by KVM. Note that for
> instructions executed in the guest directly, hardware will perform the
> check.
>
> Not all instruction emulation leads to accesses to guest linear addresses
> because 1) some instrutions like CPUID, RDMSR, don't take memory as

/s/instrutions/instructions
> operands 2) instruction fetch in most cases is already done inside the
> guest.
What are the instruction fetch cases not covered in non-root mode?
And IIUC, the patch actually doesn't distinguish them and alway checks 
LASS voilation
for instruction fetch in instruction emulation, right?

>
> Four cases in which kvm may access guest linear addresses are identified
> by code inspection:
> - KVM emulator uses segmented address for instruction fetches or data
>    accesses.
> - For implicit data access, KVM emulator gets address to a system data
to or from?

>    structure(GDT/LDT/IDT/TR).
> - For VMX instruction emulation, KVM gets the address from "VM-exit
>    instruction information" field in VMCS.
> - For SGX ENCLS instruction emulation, KVM gets the address from registers.
>
> LASS violation check applies to these linear address so as to enforce
address -> addresses

> mode-based protections as hardware behaves.
>
> As exceptions, the target memory address of emulation of invlpg, branch
> and call instructions doesn't require LASS violation check.
>
> Signed-off-by: Zeng Guang <guang.zeng@intel.com>
> ---
>   arch/x86/kvm/emulate.c    | 36 +++++++++++++++++++++++++++++++-----
>   arch/x86/kvm/vmx/nested.c |  3 +++
>   arch/x86/kvm/vmx/sgx.c    |  2 ++
>   3 files changed, 36 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index 5cc3efa0e21c..a9a022fd712e 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -687,7 +687,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>   				       struct segmented_address addr,
>   				       unsigned *max_size, unsigned size,
>   				       bool write, bool fetch,
> -				       enum x86emul_mode mode, ulong *linear)
> +				       enum x86emul_mode mode, ulong *linear,
> +				       u64 flags)
>   {
>   	struct desc_struct desc;
>   	bool usable;
> @@ -695,6 +696,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>   	u32 lim;
>   	u16 sel;
>   	u8  va_bits;
> +	u64 access = fetch ? PFERR_FETCH_MASK : 0;
>   
>   	la = seg_base(ctxt, addr.seg) + addr.ea;
>   	*max_size = 0;
> @@ -740,6 +742,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>   		}
>   		break;
>   	}
> +
> +	if (ctxt->ops->check_lass(ctxt, access, *linear, flags))
> +		goto bad;
> +
>   	if (la & (insn_alignment(ctxt, size) - 1))
>   		return emulate_gp(ctxt, 0);
>   	return X86EMUL_CONTINUE;
> @@ -757,7 +763,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
>   {
>   	unsigned max_size;
>   	return __linearize(ctxt, addr, &max_size, size, write, false,
> -			   ctxt->mode, linear);
> +			   ctxt->mode, linear, 0);
>   }
>   
>   static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
> @@ -770,7 +776,10 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
>   
>   	if (ctxt->op_bytes != sizeof(unsigned long))
>   		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
> -	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
> +
> +	/* LASS doesn't apply to address for branch and call instructions */
> +	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
> +	     &linear, KVM_X86_EMULFLAG_SKIP_LASS);
>   	if (rc == X86EMUL_CONTINUE)
>   		ctxt->_eip = addr.ea;
>   	return rc;
> @@ -845,6 +854,13 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
>   static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
>   			      void *data, unsigned size)
>   {
> +	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
> +		ctxt->exception.vector = GP_VECTOR;
> +		ctxt->exception.error_code = 0;
> +		ctxt->exception.error_code_valid = true;
> +		return X86EMUL_PROPAGATE_FAULT;
> +	}
> +
>   	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
>   }
>   
> @@ -852,6 +868,13 @@ static int linear_write_system(struct x86_emulate_ctxt *ctxt,
>   			       ulong linear, void *data,
>   			       unsigned int size)
>   {
> +	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
> +		ctxt->exception.vector = GP_VECTOR;
> +		ctxt->exception.error_code = 0;
> +		ctxt->exception.error_code_valid = true;
> +		return X86EMUL_PROPAGATE_FAULT;
> +	}
> +
>   	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
>   }
>   
> @@ -907,7 +930,7 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
>   	 * against op_size.
>   	 */
>   	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
> -			 &linear);
> +			 &linear, 0);
>   	if (unlikely(rc != X86EMUL_CONTINUE))
>   		return rc;
>   
> @@ -3432,8 +3455,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
>   {
>   	int rc;
>   	ulong linear;
> +	unsigned max_size;
>   
> -	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
> +	/* LASS doesn't apply to the memory address for invlpg */
> +	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, false, false,
> +	     ctxt->mode, &linear, KVM_X86_EMULFLAG_SKIP_LASS);
>   	if (rc == X86EMUL_CONTINUE)
>   		ctxt->ops->invlpg(ctxt, linear);
>   	/* Disable writeback. */
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index c8ae9d0e59b3..55c88c4593a6 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -4974,6 +4974,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
>   		 * destination for long mode!
>   		 */
>   		exn = is_noncanonical_address(*ret, vcpu);
> +
> +		if (!exn)
> +			exn = __vmx_check_lass(vcpu, 0, *ret, 0);
>   	} else {
>   		/*
>   		 * When not in long mode, the virtual/linear address is
> diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
> index b12da2a6dec9..30cb5d0980be 100644
> --- a/arch/x86/kvm/vmx/sgx.c
> +++ b/arch/x86/kvm/vmx/sgx.c
> @@ -37,6 +37,8 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
>   		fault = true;
>   	} else if (likely(is_long_mode(vcpu))) {
>   		fault = is_noncanonical_address(*gva, vcpu);
> +		if (!fault)
> +			fault = __vmx_check_lass(vcpu, 0, *gva, 0);
>   	} else {
>   		*gva &= 0xffffffff;
>   		fault = (s.unusable) ||
  
Zeng Guang April 25, 2023, 6:40 a.m. UTC | #2
On 4/25/2023 10:52 AM, Binbin Wu wrote:
>
> On 4/20/2023 9:37 PM, Zeng Guang wrote:
>> Do LASS violation check for instructions emulated by KVM. Note that for
>> instructions executed in the guest directly, hardware will perform the
>> check.
>>
>> Not all instruction emulation leads to accesses to guest linear addresses
>> because 1) some instrutions like CPUID, RDMSR, don't take memory as
> /s/instrutions/instructions
Oops. :P
>> operands 2) instruction fetch in most cases is already done inside the
>> guest.
> What are the instruction fetch cases not covered in non-root mode?
> And IIUC, the patch actually doesn't distinguish them and alway checks
> LASS voilation
> for instruction fetch in instruction emulation, right?

Here states most of instruction needn't be fetched by KVM. KVM intercept the
most of privileged instructions and complete the function emulation 
directly.
But some instructions requires KVM to fetch the code and emulate 
further, e.g.
lgdt/sgdt etc. KVM will always do LASS violation check on instruction 
fetch once
it happens.

>> Four cases in which kvm may access guest linear addresses are identified
>> by code inspection:
>> - KVM emulator uses segmented address for instruction fetches or data
>>     accesses.
>> - For implicit data access, KVM emulator gets address to a system data
> to or from?

It means the address pointing *to* a system data structure.

>>     structure(GDT/LDT/IDT/TR).
>> - For VMX instruction emulation, KVM gets the address from "VM-exit
>>     instruction information" field in VMCS.
>> - For SGX ENCLS instruction emulation, KVM gets the address from registers.
>>
>> LASS violation check applies to these linear address so as to enforce
> address -> addresses
OK.
>
>> mode-based protections as hardware behaves.
>>
>> As exceptions, the target memory address of emulation of invlpg, branch
>> and call instructions doesn't require LASS violation check.
>>
>> Signed-off-by: Zeng Guang <guang.zeng@intel.com>
>> ---
>>    arch/x86/kvm/emulate.c    | 36 +++++++++++++++++++++++++++++++-----
>>    arch/x86/kvm/vmx/nested.c |  3 +++
>>    arch/x86/kvm/vmx/sgx.c    |  2 ++
>>    3 files changed, 36 insertions(+), 5 deletions(-)
>>
>> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
>> index 5cc3efa0e21c..a9a022fd712e 100644
>> --- a/arch/x86/kvm/emulate.c
>> +++ b/arch/x86/kvm/emulate.c
>> @@ -687,7 +687,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>>    				       struct segmented_address addr,
>>    				       unsigned *max_size, unsigned size,
>>    				       bool write, bool fetch,
>> -				       enum x86emul_mode mode, ulong *linear)
>> +				       enum x86emul_mode mode, ulong *linear,
>> +				       u64 flags)
>>    {
>>    	struct desc_struct desc;
>>    	bool usable;
>> @@ -695,6 +696,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>>    	u32 lim;
>>    	u16 sel;
>>    	u8  va_bits;
>> +	u64 access = fetch ? PFERR_FETCH_MASK : 0;
>>    
>>    	la = seg_base(ctxt, addr.seg) + addr.ea;
>>    	*max_size = 0;
>> @@ -740,6 +742,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>>    		}
>>    		break;
>>    	}
>> +
>> +	if (ctxt->ops->check_lass(ctxt, access, *linear, flags))
>> +		goto bad;
>> +
>>    	if (la & (insn_alignment(ctxt, size) - 1))
>>    		return emulate_gp(ctxt, 0);
>>    	return X86EMUL_CONTINUE;
>> @@ -757,7 +763,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
>>    {
>>    	unsigned max_size;
>>    	return __linearize(ctxt, addr, &max_size, size, write, false,
>> -			   ctxt->mode, linear);
>> +			   ctxt->mode, linear, 0);
>>    }
>>    
>>    static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
>> @@ -770,7 +776,10 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
>>    
>>    	if (ctxt->op_bytes != sizeof(unsigned long))
>>    		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
>> -	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
>> +
>> +	/* LASS doesn't apply to address for branch and call instructions */
>> +	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
>> +	     &linear, KVM_X86_EMULFLAG_SKIP_LASS);
>>    	if (rc == X86EMUL_CONTINUE)
>>    		ctxt->_eip = addr.ea;
>>    	return rc;
>> @@ -845,6 +854,13 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
>>    static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
>>    			      void *data, unsigned size)
>>    {
>> +	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
>> +		ctxt->exception.vector = GP_VECTOR;
>> +		ctxt->exception.error_code = 0;
>> +		ctxt->exception.error_code_valid = true;
>> +		return X86EMUL_PROPAGATE_FAULT;
>> +	}
>> +
>>    	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
>>    }
>>    
>> @@ -852,6 +868,13 @@ static int linear_write_system(struct x86_emulate_ctxt *ctxt,
>>    			       ulong linear, void *data,
>>    			       unsigned int size)
>>    {
>> +	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
>> +		ctxt->exception.vector = GP_VECTOR;
>> +		ctxt->exception.error_code = 0;
>> +		ctxt->exception.error_code_valid = true;
>> +		return X86EMUL_PROPAGATE_FAULT;
>> +	}
>> +
>>    	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
>>    }
>>    
>> @@ -907,7 +930,7 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
>>    	 * against op_size.
>>    	 */
>>    	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
>> -			 &linear);
>> +			 &linear, 0);
>>    	if (unlikely(rc != X86EMUL_CONTINUE))
>>    		return rc;
>>    
>> @@ -3432,8 +3455,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
>>    {
>>    	int rc;
>>    	ulong linear;
>> +	unsigned max_size;
>>    
>> -	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
>> +	/* LASS doesn't apply to the memory address for invlpg */
>> +	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, false, false,
>> +	     ctxt->mode, &linear, KVM_X86_EMULFLAG_SKIP_LASS);
>>    	if (rc == X86EMUL_CONTINUE)
>>    		ctxt->ops->invlpg(ctxt, linear);
>>    	/* Disable writeback. */
>> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
>> index c8ae9d0e59b3..55c88c4593a6 100644
>> --- a/arch/x86/kvm/vmx/nested.c
>> +++ b/arch/x86/kvm/vmx/nested.c
>> @@ -4974,6 +4974,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
>>    		 * destination for long mode!
>>    		 */
>>    		exn = is_noncanonical_address(*ret, vcpu);
>> +
>> +		if (!exn)
>> +			exn = __vmx_check_lass(vcpu, 0, *ret, 0);
>>    	} else {
>>    		/*
>>    		 * When not in long mode, the virtual/linear address is
>> diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
>> index b12da2a6dec9..30cb5d0980be 100644
>> --- a/arch/x86/kvm/vmx/sgx.c
>> +++ b/arch/x86/kvm/vmx/sgx.c
>> @@ -37,6 +37,8 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
>>    		fault = true;
>>    	} else if (likely(is_long_mode(vcpu))) {
>>    		fault = is_noncanonical_address(*gva, vcpu);
>> +		if (!fault)
>> +			fault = __vmx_check_lass(vcpu, 0, *gva, 0);
>>    	} else {
>>    		*gva &= 0xffffffff;
>>    		fault = (s.unusable) ||
  
Yuan Yao April 26, 2023, 1:31 a.m. UTC | #3
On Thu, Apr 20, 2023 at 09:37:22PM +0800, Zeng Guang wrote:
> Do LASS violation check for instructions emulated by KVM. Note that for
> instructions executed in the guest directly, hardware will perform the
> check.
>
> Not all instruction emulation leads to accesses to guest linear addresses
> because 1) some instrutions like CPUID, RDMSR, don't take memory as
> operands 2) instruction fetch in most cases is already done inside the
> guest.
>
> Four cases in which kvm may access guest linear addresses are identified
> by code inspection:
> - KVM emulator uses segmented address for instruction fetches or data
>   accesses.
> - For implicit data access, KVM emulator gets address to a system data
>   structure(GDT/LDT/IDT/TR).
> - For VMX instruction emulation, KVM gets the address from "VM-exit
>   instruction information" field in VMCS.
> - For SGX ENCLS instruction emulation, KVM gets the address from registers.
>
> LASS violation check applies to these linear address so as to enforce
> mode-based protections as hardware behaves.
>
> As exceptions, the target memory address of emulation of invlpg, branch
> and call instructions doesn't require LASS violation check.
>
> Signed-off-by: Zeng Guang <guang.zeng@intel.com>
> ---
>  arch/x86/kvm/emulate.c    | 36 +++++++++++++++++++++++++++++++-----
>  arch/x86/kvm/vmx/nested.c |  3 +++
>  arch/x86/kvm/vmx/sgx.c    |  2 ++
>  3 files changed, 36 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index 5cc3efa0e21c..a9a022fd712e 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -687,7 +687,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>  				       struct segmented_address addr,
>  				       unsigned *max_size, unsigned size,
>  				       bool write, bool fetch,
> -				       enum x86emul_mode mode, ulong *linear)
> +				       enum x86emul_mode mode, ulong *linear,
> +				       u64 flags)
>  {
>  	struct desc_struct desc;
>  	bool usable;
> @@ -695,6 +696,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>  	u32 lim;
>  	u16 sel;
>  	u8  va_bits;
> +	u64 access = fetch ? PFERR_FETCH_MASK : 0;
>
>  	la = seg_base(ctxt, addr.seg) + addr.ea;
>  	*max_size = 0;
> @@ -740,6 +742,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
>  		}
>  		break;
>  	}
> +
> +	if (ctxt->ops->check_lass(ctxt, access, *linear, flags))
> +		goto bad;
> +
>  	if (la & (insn_alignment(ctxt, size) - 1))
>  		return emulate_gp(ctxt, 0);
>  	return X86EMUL_CONTINUE;
> @@ -757,7 +763,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
>  {
>  	unsigned max_size;
>  	return __linearize(ctxt, addr, &max_size, size, write, false,
> -			   ctxt->mode, linear);
> +			   ctxt->mode, linear, 0);
>  }
>
>  static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
> @@ -770,7 +776,10 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
>
>  	if (ctxt->op_bytes != sizeof(unsigned long))
>  		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
> -	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
> +
> +	/* LASS doesn't apply to address for branch and call instructions */
> +	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
> +	     &linear, KVM_X86_EMULFLAG_SKIP_LASS);

The emulator.c is common part of x86, so may more common
abstraction like permiession_check_before_paging better ?
Let's also wait other guy's input for this.

>  	if (rc == X86EMUL_CONTINUE)
>  		ctxt->_eip = addr.ea;
>  	return rc;
> @@ -845,6 +854,13 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
>  static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
>  			      void *data, unsigned size)
>  {
> +	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
> +		ctxt->exception.vector = GP_VECTOR;
> +		ctxt->exception.error_code = 0;
> +		ctxt->exception.error_code_valid = true;
> +		return X86EMUL_PROPAGATE_FAULT;
> +	}
> +
>  	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
>  }
>
> @@ -852,6 +868,13 @@ static int linear_write_system(struct x86_emulate_ctxt *ctxt,
>  			       ulong linear, void *data,
>  			       unsigned int size)
>  {
> +	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
> +		ctxt->exception.vector = GP_VECTOR;
> +		ctxt->exception.error_code = 0;
> +		ctxt->exception.error_code_valid = true;
> +		return X86EMUL_PROPAGATE_FAULT;
> +	}
> +
>  	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
>  }
>
> @@ -907,7 +930,7 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
>  	 * against op_size.
>  	 */
>  	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
> -			 &linear);
> +			 &linear, 0);
>  	if (unlikely(rc != X86EMUL_CONTINUE))
>  		return rc;
>
> @@ -3432,8 +3455,11 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
>  {
>  	int rc;
>  	ulong linear;
> +	unsigned max_size;
>
> -	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
> +	/* LASS doesn't apply to the memory address for invlpg */
> +	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, false, false,
> +	     ctxt->mode, &linear, KVM_X86_EMULFLAG_SKIP_LASS);
>  	if (rc == X86EMUL_CONTINUE)
>  		ctxt->ops->invlpg(ctxt, linear);
>  	/* Disable writeback. */
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index c8ae9d0e59b3..55c88c4593a6 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -4974,6 +4974,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
>  		 * destination for long mode!
>  		 */
>  		exn = is_noncanonical_address(*ret, vcpu);
> +
> +		if (!exn)
> +			exn = __vmx_check_lass(vcpu, 0, *ret, 0);
>  	} else {
>  		/*
>  		 * When not in long mode, the virtual/linear address is
> diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
> index b12da2a6dec9..30cb5d0980be 100644
> --- a/arch/x86/kvm/vmx/sgx.c
> +++ b/arch/x86/kvm/vmx/sgx.c
> @@ -37,6 +37,8 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
>  		fault = true;
>  	} else if (likely(is_long_mode(vcpu))) {
>  		fault = is_noncanonical_address(*gva, vcpu);
> +		if (!fault)
> +			fault = __vmx_check_lass(vcpu, 0, *gva, 0);
>  	} else {
>  		*gva &= 0xffffffff;
>  		fault = (s.unusable) ||
> --
> 2.27.0
>
  

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5cc3efa0e21c..a9a022fd712e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -687,7 +687,8 @@  static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 				       struct segmented_address addr,
 				       unsigned *max_size, unsigned size,
 				       bool write, bool fetch,
-				       enum x86emul_mode mode, ulong *linear)
+				       enum x86emul_mode mode, ulong *linear,
+				       u64 flags)
 {
 	struct desc_struct desc;
 	bool usable;
@@ -695,6 +696,7 @@  static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 	u32 lim;
 	u16 sel;
 	u8  va_bits;
+	u64 access = fetch ? PFERR_FETCH_MASK : 0;
 
 	la = seg_base(ctxt, addr.seg) + addr.ea;
 	*max_size = 0;
@@ -740,6 +742,10 @@  static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 		}
 		break;
 	}
+
+	if (ctxt->ops->check_lass(ctxt, access, *linear, flags))
+		goto bad;
+
 	if (la & (insn_alignment(ctxt, size) - 1))
 		return emulate_gp(ctxt, 0);
 	return X86EMUL_CONTINUE;
@@ -757,7 +763,7 @@  static int linearize(struct x86_emulate_ctxt *ctxt,
 {
 	unsigned max_size;
 	return __linearize(ctxt, addr, &max_size, size, write, false,
-			   ctxt->mode, linear);
+			   ctxt->mode, linear, 0);
 }
 
 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
@@ -770,7 +776,10 @@  static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 
 	if (ctxt->op_bytes != sizeof(unsigned long))
 		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
-	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+
+	/* LASS doesn't apply to address for branch and call instructions */
+	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
+	     &linear, KVM_X86_EMULFLAG_SKIP_LASS);
 	if (rc == X86EMUL_CONTINUE)
 		ctxt->_eip = addr.ea;
 	return rc;
@@ -845,6 +854,13 @@  static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
 			      void *data, unsigned size)
 {
+	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
+		ctxt->exception.vector = GP_VECTOR;
+		ctxt->exception.error_code = 0;
+		ctxt->exception.error_code_valid = true;
+		return X86EMUL_PROPAGATE_FAULT;
+	}
+
 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
 }
 
@@ -852,6 +868,13 @@  static int linear_write_system(struct x86_emulate_ctxt *ctxt,
 			       ulong linear, void *data,
 			       unsigned int size)
 {
+	if (ctxt->ops->check_lass(ctxt, PFERR_IMPLICIT_ACCESS, linear, 0)) {
+		ctxt->exception.vector = GP_VECTOR;
+		ctxt->exception.error_code = 0;
+		ctxt->exception.error_code_valid = true;
+		return X86EMUL_PROPAGATE_FAULT;
+	}
+
 	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
 }
 
@@ -907,7 +930,7 @@  static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 	 * against op_size.
 	 */
 	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
-			 &linear);
+			 &linear, 0);
 	if (unlikely(rc != X86EMUL_CONTINUE))
 		return rc;
 
@@ -3432,8 +3455,11 @@  static int em_invlpg(struct x86_emulate_ctxt *ctxt)
 {
 	int rc;
 	ulong linear;
+	unsigned max_size;
 
-	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+	/* LASS doesn't apply to the memory address for invlpg */
+	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, false, false,
+	     ctxt->mode, &linear, KVM_X86_EMULFLAG_SKIP_LASS);
 	if (rc == X86EMUL_CONTINUE)
 		ctxt->ops->invlpg(ctxt, linear);
 	/* Disable writeback. */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index c8ae9d0e59b3..55c88c4593a6 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4974,6 +4974,9 @@  int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
 		 * destination for long mode!
 		 */
 		exn = is_noncanonical_address(*ret, vcpu);
+
+		if (!exn)
+			exn = __vmx_check_lass(vcpu, 0, *ret, 0);
 	} else {
 		/*
 		 * When not in long mode, the virtual/linear address is
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index b12da2a6dec9..30cb5d0980be 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -37,6 +37,8 @@  static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
 		fault = true;
 	} else if (likely(is_long_mode(vcpu))) {
 		fault = is_noncanonical_address(*gva, vcpu);
+		if (!fault)
+			fault = __vmx_check_lass(vcpu, 0, *gva, 0);
 	} else {
 		*gva &= 0xffffffff;
 		fault = (s.unusable) ||