[3/6] x86/entry_32: Add VERW just before userspace transition

Message ID 20231020-delay-verw-v1-3-cff54096326d@linux.intel.com
State New
Headers
Series Delay VERW |

Commit Message

Pawan Gupta Oct. 20, 2023, 8:45 p.m. UTC
  As done for entry_64, add support for executing VERW late in exit to
user path for 32-bit mode.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
---
 arch/x86/entry/entry_32.S | 8 ++++++++
 1 file changed, 8 insertions(+)
  

Comments

Andi Kleen Oct. 20, 2023, 11:49 p.m. UTC | #1
On Fri, Oct 20, 2023 at 01:45:09PM -0700, Pawan Gupta wrote:
> As done for entry_64, add support for executing VERW late in exit to
> user path for 32-bit mode.
> 
> Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
> ---
>  arch/x86/entry/entry_32.S | 8 ++++++++
>  1 file changed, 8 insertions(+)
> 
> diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
> index 6e6af42e044a..bbf77d2aab2e 100644
> --- a/arch/x86/entry/entry_32.S
> +++ b/arch/x86/entry/entry_32.S
> @@ -886,6 +886,9 @@ SYM_FUNC_START(entry_SYSENTER_32)
>  	popfl
>  	popl	%eax
>  
> +	/* Mitigate CPU data sampling attacks .e.g. MDS */
> +	USER_CLEAR_CPU_BUFFERS
> +
>  	/*
>  	 * Return back to the vDSO, which will pop ecx and edx.
>  	 * Don't bother with DS and ES (they already contain __USER_DS).

Did you forget the INT 0x80 entry point?

-Andi
  
Pawan Gupta Oct. 21, 2023, 1:28 a.m. UTC | #2
On Fri, Oct 20, 2023 at 04:49:34PM -0700, Andi Kleen wrote:
> On Fri, Oct 20, 2023 at 01:45:09PM -0700, Pawan Gupta wrote:
> > As done for entry_64, add support for executing VERW late in exit to
> > user path for 32-bit mode.
> > 
> > Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
> > ---
> >  arch/x86/entry/entry_32.S | 8 ++++++++
> >  1 file changed, 8 insertions(+)
> > 
> > diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
> > index 6e6af42e044a..bbf77d2aab2e 100644
> > --- a/arch/x86/entry/entry_32.S
> > +++ b/arch/x86/entry/entry_32.S
> > @@ -886,6 +886,9 @@ SYM_FUNC_START(entry_SYSENTER_32)
> >  	popfl
> >  	popl	%eax
> >  
> > +	/* Mitigate CPU data sampling attacks .e.g. MDS */
> > +	USER_CLEAR_CPU_BUFFERS
> > +
> >  	/*
> >  	 * Return back to the vDSO, which will pop ecx and edx.
> >  	 * Don't bother with DS and ES (they already contain __USER_DS).
> 
> Did you forget the INT 0x80 entry point?

I do have VERW in the INT80 path, the diff is showing just the label
restore_all_switch_stack. Below is the sequence:

SYM_FUNC_START(entry_INT80_32)
	ASM_CLAC
	pushl	%eax			/* pt_regs->orig_ax */

	SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1	/* save rest */

	movl	%esp, %eax
	call	do_int80_syscall_32
.Lsyscall_32_done:
	STACKLEAK_ERASE

restore_all_switch_stack:
	SWITCH_TO_ENTRY_STACK
	CHECK_AND_APPLY_ESPFIX

	/* Switch back to user CR3 */
	SWITCH_TO_USER_CR3 scratch_reg=%eax

	BUG_IF_WRONG_CR3

	/* Restore user state */
	RESTORE_REGS pop=4			# skip orig_eax/error_code

	/* Mitigate CPU data sampling attacks .e.g. MDS */
	USER_CLEAR_CPU_BUFFERS
	^^^^^^^^^^^^^^^^^^^^^^
  

Patch

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 6e6af42e044a..bbf77d2aab2e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -886,6 +886,9 @@  SYM_FUNC_START(entry_SYSENTER_32)
 	popfl
 	popl	%eax
 
+	/* Mitigate CPU data sampling attacks .e.g. MDS */
+	USER_CLEAR_CPU_BUFFERS
+
 	/*
 	 * Return back to the vDSO, which will pop ecx and edx.
 	 * Don't bother with DS and ES (they already contain __USER_DS).
@@ -954,6 +957,9 @@  restore_all_switch_stack:
 
 	/* Restore user state */
 	RESTORE_REGS pop=4			# skip orig_eax/error_code
+
+	/* Mitigate CPU data sampling attacks .e.g. MDS */
+	USER_CLEAR_CPU_BUFFERS
 .Lirq_return:
 	/*
 	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -1146,6 +1152,8 @@  SYM_CODE_START(asm_exc_nmi)
 
 	/* Not on SYSENTER stack. */
 	call	exc_nmi
+	/* Mitigate CPU data sampling attacks .e.g. MDS */
+	USER_CLEAR_CPU_BUFFERS
 	jmp	.Lnmi_return
 
 .Lnmi_from_sysenter_stack: