[v2,15/20] x86: head_64: Switch to kernel CS before enabling memory encryption

Message ID 20230508070330.582131-16-ardb@kernel.org
State New
Headers
Series efi/x86: Avoid bare metal decompressor during EFI boot |

Commit Message

Ard Biesheuvel May 8, 2023, 7:03 a.m. UTC
  The SME initialization triggers #VC exceptions due to the use of CPUID
instructions, and returning from an exception restores the code segment
that was active when the exception was taken.

This means we should ensure that we switch the code segment to one that
is described in the GDT we just loaded before running the SME init code.

Reported-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/kernel/head_64.S | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)
  

Comments

Tom Lendacky May 17, 2023, 6:54 p.m. UTC | #1
On 5/8/23 02:03, Ard Biesheuvel wrote:
> The SME initialization triggers #VC exceptions due to the use of CPUID
> instructions, and returning from an exception restores the code segment
> that was active when the exception was taken.
> 
> This means we should ensure that we switch the code segment to one that
> is described in the GDT we just loaded before running the SME init code.
> 
> Reported-by: Tom Lendacky <thomas.lendacky@amd.com>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Ah, just saw this as I was going through my email backlog...  I submitted 
a separate patch just a little earlier today for this issue. I guess we'll 
let the maintainers decide how they want to handle it.

Thanks,
Tom

> ---
>   arch/x86/kernel/head_64.S | 18 +++++++++---------
>   1 file changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index 95b12fdae10e1dc9..a128ac62956ff7c4 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -76,6 +76,15 @@ SYM_CODE_START_NOALIGN(startup_64)
>   
>   	call	startup_64_setup_env
>   
> +	/* Now switch to __KERNEL_CS so IRET works reliably */
> +	pushq	$__KERNEL_CS
> +	leaq	.Lon_kernel_cs(%rip), %rax
> +	pushq	%rax
> +	lretq
> +
> +.Lon_kernel_cs:
> +	UNWIND_HINT_END_OF_STACK
> +
>   #ifdef CONFIG_AMD_MEM_ENCRYPT
>   	/*
>   	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
> @@ -87,15 +96,6 @@ SYM_CODE_START_NOALIGN(startup_64)
>   	call	sme_enable
>   #endif
>   
> -	/* Now switch to __KERNEL_CS so IRET works reliably */
> -	pushq	$__KERNEL_CS
> -	leaq	.Lon_kernel_cs(%rip), %rax
> -	pushq	%rax
> -	lretq
> -
> -.Lon_kernel_cs:
> -	UNWIND_HINT_END_OF_STACK
> -
>   	/* Sanitize CPU configuration */
>   	call verify_cpu
>
  

Patch

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 95b12fdae10e1dc9..a128ac62956ff7c4 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -76,6 +76,15 @@  SYM_CODE_START_NOALIGN(startup_64)
 
 	call	startup_64_setup_env
 
+	/* Now switch to __KERNEL_CS so IRET works reliably */
+	pushq	$__KERNEL_CS
+	leaq	.Lon_kernel_cs(%rip), %rax
+	pushq	%rax
+	lretq
+
+.Lon_kernel_cs:
+	UNWIND_HINT_END_OF_STACK
+
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 	/*
 	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
@@ -87,15 +96,6 @@  SYM_CODE_START_NOALIGN(startup_64)
 	call	sme_enable
 #endif
 
-	/* Now switch to __KERNEL_CS so IRET works reliably */
-	pushq	$__KERNEL_CS
-	leaq	.Lon_kernel_cs(%rip), %rax
-	pushq	%rax
-	lretq
-
-.Lon_kernel_cs:
-	UNWIND_HINT_END_OF_STACK
-
 	/* Sanitize CPU configuration */
 	call verify_cpu