[4/4] LoongArch: Add support for kernel address space layout randomization (KASLR)
Commit Message
This patch adds support for relocating the kernel to a random address.
Entropy is derived from the banner, which will change every build and
random_get_entropy() which should provide additional runtime entropy.
The kernel is relocated by up to RANDOMIZE_BASE_MAX_OFFSET bytes from
its link address. Because relocation happens so early in the kernel boot,
the amount of physical memory has not yet been determined. This means
the only way to limit relocation within the available memory is via
Kconfig. Limit the maximum value of RANDOMIZE_BASE_MAX_OFFSET to
256M(0x10000000) because our memory layout has many holes.
KERNELOFFSET (kaslr_offset) is added to vmcoreinfo in the future, for
crash --kaslr support.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
---
arch/loongarch/Kconfig | 22 ++++++
arch/loongarch/include/asm/page.h | 6 ++
arch/loongarch/kernel/relocate.c | 114 ++++++++++++++++++++++++++++++
arch/loongarch/kernel/setup.c | 3 +
4 files changed, 145 insertions(+)
Comments
Hi Youling,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on linus/master]
[also build test WARNING on v6.2-rc3 next-20230111]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Youling-Tang/LoongArch-Use-trampoline-for-exception-handlers-and-kill-la-abs/20230109-171344
patch link: https://lore.kernel.org/r/1673255274-18238-5-git-send-email-tangyouling%40loongson.cn
patch subject: [PATCH 4/4] LoongArch: Add support for kernel address space layout randomization (KASLR)
config: loongarch-randconfig-s032-20230110
compiler: loongarch64-linux-gcc (GCC) 12.1.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.4-39-gce1a6720-dirty
# https://github.com/intel-lab-lkp/linux/commit/66cda25683cd9f1c9b39a92e6cfe0e579b5b0722
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Youling-Tang/LoongArch-Use-trampoline-for-exception-handlers-and-kill-la-abs/20230109-171344
git checkout 66cda25683cd9f1c9b39a92e6cfe0e579b5b0722
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=loongarch olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=loongarch SHELL=/bin/bash arch/loongarch/kernel/
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
sparse warnings: (new ones prefixed by >>)
arch/loongarch/kernel/relocate.c:175:1: sparse: sparse: unused label 'out'
>> arch/loongarch/kernel/relocate.c:127:38: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected char *cmdline @@ got void [noderef] __iomem * @@
arch/loongarch/kernel/relocate.c:127:38: sparse: expected char *cmdline
arch/loongarch/kernel/relocate.c:127:38: sparse: got void [noderef] __iomem *
arch/loongarch/kernel/relocate.c:160:48: sparse: sparse: incorrect type in assignment (different base types) @@ expected unsigned long long [usertype] relocated_addr @@ got void * @@
arch/loongarch/kernel/relocate.c:160:48: sparse: expected unsigned long long [usertype] relocated_addr
arch/loongarch/kernel/relocate.c:160:48: sparse: got void *
vim +127 arch/loongarch/kernel/relocate.c
117
118 void *__init relocate_kernel(void)
119 {
120 Elf64_Rela *rela, *rela_end;
121 void *loc_new;
122 unsigned long kernel_length;
123 long offset = 0;
124 int res = 1;
125 /* Default to original kernel entry point */
126 void *kernel_entry = start_kernel;
> 127 char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE);
128
129 /* Boot command line was passed in fw_arg1 */
130 strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
131
132 kernel_length = (long)(_end) - (long)(_text);
133
134 loc_new = determine_relocation_address();
135
136 /* Sanity check relocation address */
137 if (relocation_addr_valid(loc_new))
138 offset = (unsigned long)loc_new - (unsigned long)(_text);
139
140 if (offset) {
141 /* Copy the kernel to it's new location */
142 memcpy(loc_new, _text, kernel_length);
143
144 /* Sync the caches ready for execution of new kernel */
145 __asm__ __volatile__ (
146 "ibar 0 \t\n"
147 "dbar 0 \t\n");
148
149 rela = (Elf64_Rela *)RELOCATED(&__rela_dyn_start);
150 rela_end = (Elf64_Rela *)RELOCATED(&__rela_dyn_end);
151
152 for ( ; rela < rela_end; rela++) {
153 Elf64_Addr addr = rela->r_offset;
154 Elf64_Addr relocated_addr = rela->r_addend;
155
156 if (rela->r_info != R_LARCH_RELATIVE)
157 continue;
158
159 if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
160 relocated_addr = RELOCATED(relocated_addr);
161
162 *(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
163
164 }
165
166 /* The current thread is now within the relocated image */
167 __current_thread_info = RELOCATED(__current_thread_info);
168
169 /* Return the new kernel's entry point */
170 kernel_entry = RELOCATED(start_kernel);
171
172 /* Error may occur before, so keep it at last */
173 update_kaslr_offset(&__kaslr_offset, offset);
174 }
> 175 out:
176 return kernel_entry;
177 }
178
@@ -489,6 +489,28 @@ config RELOCATABLE
kernel binary at runtime to a different virtual address than the
address it was linked at.
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image (KASLR)"
+ depends on RELOCATABLE
+ help
+ Randomizes the physical and virtual address at which the
+ kernel image is loaded, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
+
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
+
+ If unsure, say N.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+ hex "Maximum KASLR offset" if EXPERT
+ depends on RANDOMIZE_BASE
+ range 0x0 0x10000000 if 64BIT
+ default "0x01000000"
+ help
+ When KASLR is active, this provides the maximum offset that will
+ be applied to the kernel image.
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
@@ -106,6 +106,12 @@ extern int __virt_addr_valid(volatile void *kaddr);
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+extern unsigned long __kaslr_offset;
+static inline unsigned long kaslr_offset(void)
+{
+ return __kaslr_offset;
+}
+
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
@@ -11,6 +11,7 @@
#include <linux/printk.h>
#include <linux/panic_notifier.h>
#include <asm/bootinfo.h>
+#include <asm/early_ioremap.h>
#include <asm/inst.h>
#include <asm/sections.h>
@@ -19,6 +20,70 @@
extern long __rela_dyn_start;
extern long __rela_dyn_end;
+#ifdef CONFIG_RANDOMIZE_BASE
+
+static inline __init unsigned long rotate_xor(unsigned long hash,
+ const void *area, size_t size)
+{
+ size_t i;
+ unsigned long *ptr = (unsigned long *)area;
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+ unsigned long entropy = random_get_entropy();
+ unsigned long hash = 0;
+
+ /* Attempt to create a simple but unpredictable starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+ /* Add in any runtime entropy we can get */
+ hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+ return hash;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+ char *str;
+
+ str = strstr(boot_command_line, "nokaslr");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+
+ return false;
+}
+
+/* Choose a new address for the kernel */
+static inline void __init *determine_relocation_address(void)
+{
+ unsigned long kernel_length;
+ void *dest = _text;
+ unsigned long offset;
+
+ if (kaslr_disabled())
+ return dest;
+
+ kernel_length = (long)_end - (long)_text;
+
+ offset = get_random_boot() << 16;
+ offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+ if (offset < kernel_length)
+ offset += ALIGN(kernel_length, 0xffff);
+
+ return RELOCATED(dest);
+}
+
+#else
+
/*
* Choose a new address for the kernel, for now we'll hard
* code the destination.
@@ -28,6 +93,8 @@ static inline void __init *determine_relocation_address(void)
return (void *)(CACHE_BASE + 0x02000000);
}
+#endif
+
static inline int __init relocation_addr_valid(void *loc_new)
{
if ((unsigned long)loc_new & 0x0000ffff) {
@@ -41,6 +108,13 @@ static inline int __init relocation_addr_valid(void *loc_new)
return 1;
}
+static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
+{
+ unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
+
+ *new_addr = (unsigned long)offset;
+}
+
void *__init relocate_kernel(void)
{
Elf64_Rela *rela, *rela_end;
@@ -50,6 +124,10 @@ void *__init relocate_kernel(void)
int res = 1;
/* Default to original kernel entry point */
void *kernel_entry = start_kernel;
+ char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE);
+
+ /* Boot command line was passed in fw_arg1 */
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
kernel_length = (long)(_end) - (long)(_text);
@@ -90,7 +168,43 @@ void *__init relocate_kernel(void)
/* Return the new kernel's entry point */
kernel_entry = RELOCATED(start_kernel);
+
+ /* Error may occur before, so keep it at last */
+ update_kaslr_offset(&__kaslr_offset, offset);
}
out:
return kernel_entry;
}
+
+/*
+ * Show relocation information on panic.
+ */
+static void show_kernel_relocation(const char *level)
+{
+ if (__kaslr_offset > 0) {
+ printk(level);
+ pr_cont("Kernel relocated offset @ 0x%lx\n", __kaslr_offset);
+ pr_cont(" .text @ 0x%lx\n", (unsigned long)&_text);
+ pr_cont(" .data @ 0x%lx\n", (unsigned long)&_sdata);
+ pr_cont(" .bss @ 0x%lx\n", (unsigned long)&__bss_start);
+ }
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ show_kernel_relocation(KERN_EMERG);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+ .notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_location_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
@@ -82,6 +82,9 @@ static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
static struct resource bss_resource = { .name = "Kernel bss", };
+unsigned long __kaslr_offset __ro_after_init;
+EXPORT_SYMBOL(__kaslr_offset);
+
const char *get_system_type(void)
{
return "generic-loongson-machine";