[v2,16/17] x86/sev: Drop inline asm LEA instructions for RIP-relative references

Message ID 20240125112818.2016733-35-ardb+git@google.com
State New
Headers
Series x86: Confine early 1:1 mapped startup code |

Commit Message

Ard Biesheuvel Jan. 25, 2024, 11:28 a.m. UTC
  From: Ard Biesheuvel <ardb@kernel.org>

The SEV code that may run early is now built with -fPIC and so there is
no longer a need for explicit RIP-relative references in inline asm,
given that is what the compiler will emit as well.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/mm/mem_encrypt_identity.c | 37 +++-----------------
 1 file changed, 5 insertions(+), 32 deletions(-)
  

Comments

Kevin Loughlin Jan. 25, 2024, 8:46 p.m. UTC | #1
On Thu, Jan 25, 2024 at 3:33 AM Ard Biesheuvel <ardb+git@google.com> wrote:
>
> The SEV code that may run early is now built with -fPIC and so there is
> no longer a need for explicit RIP-relative references in inline asm,
> given that is what the compiler will emit as well.
>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
>  arch/x86/mm/mem_encrypt_identity.c | 37 +++-----------------
>  1 file changed, 5 insertions(+), 32 deletions(-)

snp_cpuid_get_table() in arch/x86/kernel/sev-shared.c (a helper
function to provide the same inline assembly pattern for RIP-relative
references) would also no longer be needed, as all calls to it would
now be made in position-independent code. We can therefore eliminate
the function as part of this commit.
  
Ard Biesheuvel Jan. 25, 2024, 11:24 p.m. UTC | #2
On Thu, 25 Jan 2024 at 21:46, Kevin Loughlin <kevinloughlin@google.com> wrote:
>
> On Thu, Jan 25, 2024 at 3:33 AM Ard Biesheuvel <ardb+git@google.com> wrote:
> >
> > The SEV code that may run early is now built with -fPIC and so there is
> > no longer a need for explicit RIP-relative references in inline asm,
> > given that is what the compiler will emit as well.
> >
> > Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> > ---
> >  arch/x86/mm/mem_encrypt_identity.c | 37 +++-----------------
> >  1 file changed, 5 insertions(+), 32 deletions(-)
>
> snp_cpuid_get_table() in arch/x86/kernel/sev-shared.c (a helper
> function to provide the same inline assembly pattern for RIP-relative
> references) would also no longer be needed, as all calls to it would
> now be made in position-independent code. We can therefore eliminate
> the function as part of this commit.

Yes that would be another nice cleanup.
  

Patch

diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 20b23da4a26d..2d857e3a560a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -86,10 +86,6 @@  struct sme_populate_pgd_data {
  */
 static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
 
-static char sme_cmdline_arg[] __initdata = "mem_encrypt";
-static char sme_cmdline_on[]  __initdata = "on";
-static char sme_cmdline_off[] __initdata = "off";
-
 static void __pitext sme_clear_pgd(struct sme_populate_pgd_data *ppd)
 {
 	unsigned long pgd_start, pgd_end, pgd_size;
@@ -333,14 +329,6 @@  void __pitext sme_encrypt_kernel(struct boot_params *bp)
 	}
 #endif
 
-	/*
-	 * We're running identity mapped, so we must obtain the address to the
-	 * SME encryption workarea using rip-relative addressing.
-	 */
-	asm ("lea sme_workarea(%%rip), %0"
-	     : "=r" (workarea_start)
-	     : "p" (sme_workarea));
-
 	/*
 	 * Calculate required number of workarea bytes needed:
 	 *   executable encryption area size:
@@ -350,7 +338,7 @@  void __pitext sme_encrypt_kernel(struct boot_params *bp)
 	 *   pagetable structures for the encryption of the kernel
 	 *   pagetable structures for workarea (in case not currently mapped)
 	 */
-	execute_start = workarea_start;
+	execute_start = workarea_start = (unsigned long)sme_workarea;
 	execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
 	execute_len = execute_end - execute_start;
 
@@ -517,9 +505,9 @@  static int __pitext __strncmp(const char *cs, const char *ct, size_t count)
 
 void __pitext sme_enable(struct boot_params *bp)
 {
-	const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
 	unsigned int eax, ebx, ecx, edx;
 	unsigned long feature_mask;
+	const char *cmdline_ptr;
 	bool active_by_default;
 	unsigned long me_mask;
 	char buffer[16];
@@ -590,21 +578,6 @@  void __pitext sme_enable(struct boot_params *bp)
 		goto out;
 	}
 
-	/*
-	 * Fixups have not been applied to phys_base yet and we're running
-	 * identity mapped, so we must obtain the address to the SME command
-	 * line argument data using rip-relative addressing.
-	 */
-	asm ("lea sme_cmdline_arg(%%rip), %0"
-	     : "=r" (cmdline_arg)
-	     : "p" (sme_cmdline_arg));
-	asm ("lea sme_cmdline_on(%%rip), %0"
-	     : "=r" (cmdline_on)
-	     : "p" (sme_cmdline_on));
-	asm ("lea sme_cmdline_off(%%rip), %0"
-	     : "=r" (cmdline_off)
-	     : "p" (sme_cmdline_off));
-
 	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
 		active_by_default = true;
 	else
@@ -613,12 +586,12 @@  void __pitext sme_enable(struct boot_params *bp)
 	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
 				     ((u64)bp->ext_cmd_line_ptr << 32));
 
-	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+	if (cmdline_find_option(cmdline_ptr, "mem_encrypt", buffer, sizeof(buffer)) < 0)
 		return;
 
-	if (!__strncmp(buffer, cmdline_on, sizeof(buffer)))
+	if (!__strncmp(buffer, "on", sizeof(buffer)))
 		sme_me_mask = me_mask;
-	else if (!__strncmp(buffer, cmdline_off, sizeof(buffer)))
+	else if (!__strncmp(buffer, "off", sizeof(buffer)))
 		sme_me_mask = 0;
 	else
 		sme_me_mask = active_by_default ? me_mask : 0;