[RFC,5/5] x86: Build the core kernel with position independent codegen

Message ID 20240122090851.851120-12-ardb+git@google.com
State New
Headers
Series x86: Build the core kernel using PIC codegen |

Commit Message

Ard Biesheuvel Jan. 22, 2024, 9:08 a.m. UTC
  From: Ard Biesheuvel <ardb@kernel.org>

Pass the -fpie flag to the compiler when building objects that are
intended for the core kernel. This ensures that all implicit symbol
references are emitted using RIP-relative relocations, allowing the code
to be executed correctly even if it runs from a different virtual
address than the address it was linked/loaded/relocated to run at.

This is necessary to ensure that all C code that gets pulled in by the
early startup code runs correctly without the need for unpalatable hacks
in the code to force RIP-relative references.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/Makefile                 | 7 +++++--
 arch/x86/entry/vdso/Makefile      | 2 +-
 arch/x86/kernel/Makefile          | 4 ----
 arch/x86/realmode/rm/Makefile     | 1 +
 include/asm-generic/vmlinux.lds.h | 2 ++
 5 files changed, 9 insertions(+), 7 deletions(-)
  

Patch

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index bed0850d91b0..0382a9534099 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -165,11 +165,13 @@  else
         KBUILD_RUSTFLAGS += $(rustflags-y)
 
         KBUILD_CFLAGS += -mno-red-zone
-        KBUILD_CFLAGS += -mcmodel=kernel
+        KBUILD_CFLAGS_MODULE += -mcmodel=kernel
         KBUILD_RUSTFLAGS += -Cno-redzone=y
-        KBUILD_RUSTFLAGS += -Ccode-model=kernel
+        KBUILD_RUSTFLAGS_KERNEL += -Ccode-model=small
+        KBUILD_RUSTFLAGS_MODULE += -Ccode-model=kernel
 
 	PIE_CFLAGS := -fpie -mcmodel=small \
+		      $(call cc-option,-Wa$(comma)-mrelax-relocations=no) \
 		      -include $(srctree)/include/linux/hidden.h
 
 	ifeq ($(CONFIG_STACKPROTECTOR),y)
@@ -178,6 +180,7 @@  else
 		endif
 	endif
 
+	KBUILD_CFLAGS_KERNEL += $(PIE_CFLAGS)
 	export PIE_CFLAGS
 endif
 
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index b1b8dd1608f7..e2c79d4c1417 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -149,7 +149,7 @@  $(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
 $(obj)/vdso32.so.dbg: asflags-$(CONFIG_X86_64) += -m32
 
 KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out -mcmodel=small,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(RANDSTRUCT_CFLAGS),$(KBUILD_CFLAGS_32))
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 65194ca79b5c..0000325ab98f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -21,10 +21,6 @@  CFLAGS_REMOVE_sev.o = -pg
 CFLAGS_REMOVE_rethook.o = -pg
 endif
 
-# head64.c contains C code that may execute from a different virtual address
-# than it was linked at, so we always build it using PIE codegen
-CFLAGS_head64.o += $(PIE_CFLAGS)
-
 KASAN_SANITIZE_head$(BITS).o				:= n
 KASAN_SANITIZE_dumpstack.o				:= n
 KASAN_SANITIZE_dumpstack_$(BITS).o			:= n
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index f614009d3e4e..fdb8e780f903 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -76,5 +76,6 @@  KBUILD_CFLAGS	:= $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
 		   -I$(srctree)/arch/x86/boot
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
 KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS_KERNEL := $(filter-out $(PIE_CFLAGS),$(KBUILD_CFLAGS_KERNEL))
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ef45331fb043..9518b87207e8 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -355,6 +355,7 @@ 
 	*(.data..decrypted)						\
 	*(.ref.data)							\
 	*(.data..shared_aligned) /* percpu related */			\
+	*(.data.rel .data.rel.*)					\
 	MEM_KEEP(init.data*)						\
 	*(.data.unlikely)						\
 	__start_once = .;						\
@@ -477,6 +478,7 @@ 
 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
 		__start_rodata = .;					\
 		*(.rodata) *(.rodata.*)					\
+		*(.data.rel.ro*)					\
 		SCHED_DATA						\
 		RO_AFTER_INIT_DATA	/* Read only after init */	\
 		. = ALIGN(8);						\