Debugging the code that runs in HYP is rather difficult, especially when
its placement is randomized. So let's take the 'nokaslr' command line
option into account, and allow it to be disabled at boot.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/arm64/include/asm/memory.h | 15 -------------
arch/arm64/include/asm/mmu.h | 23 ++++++++++++++++++++
arch/arm64/kernel/kaslr.c | 3 +--
arch/arm64/kvm/va_layout.c | 3 ++-
4 files changed, 26 insertions(+), 18 deletions(-)
@@ -197,21 +197,6 @@ extern s64 memstart_addr;
/* the offset between the kernel virtual and physical mappings */
extern u64 kimage_voffset;
-static inline unsigned long kaslr_offset(void)
-{
- return (u64)&_text - KIMAGE_VADDR;
-}
-
-static inline bool kaslr_enabled(void)
-{
- /*
- * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
- * placement of the image rather than from the seed, so a displacement
- * of less than MIN_KIMG_ALIGN means that no seed was provided.
- */
- return kaslr_offset() >= MIN_KIMG_ALIGN;
-}
-
/*
* Allow all memory at the discovery stage. We will clip it later.
*/
@@ -72,6 +72,29 @@ extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
extern bool kaslr_requires_kpti(void);
+static inline unsigned long kaslr_offset(void)
+{
+ return (u64)&_text - KIMAGE_VADDR;
+}
+
+static inline bool kaslr_enabled(void)
+{
+ /*
+ * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
+ * placement of the image rather than from the seed, so a displacement
+ * of less than MIN_KIMG_ALIGN means that no seed was provided.
+ */
+ return kaslr_offset() >= MIN_KIMG_ALIGN;
+}
+
+static inline bool kaslr_disabled_cmdline(void)
+{
+ if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val,
+ ARM64_SW_FEATURE_OVERRIDE_NOKASLR))
+ return true;
+ return false;
+}
+
#define INIT_MM_CONTEXT(name) \
.pgd = init_pg_dir,
@@ -34,8 +34,7 @@ static int __init kaslr_init(void)
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
- if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val,
- ARM64_SW_FEATURE_OVERRIDE_NOKASLR)) {
+ if (kaslr_disabled_cmdline()) {
pr_info("KASLR disabled on command line\n");
return 0;
}
@@ -72,7 +72,8 @@ __init void kvm_compute_layout(void)
va_mask = GENMASK_ULL(tag_lsb - 1, 0);
tag_val = hyp_va_msb;
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1) &&
+ !kaslr_disabled_cmdline()) {
/* We have some free bits to insert a random tag. */
tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
}