[v3,31/60] arm64: Add helpers to probe local CPU for PAC/BTI/E0PD support

Message ID 20230307140522.2311461-32-ardb@kernel.org
State New
Headers
Series arm64: Add support for LPA2 at stage1 and WXN |

Commit Message

Ard Biesheuvel March 7, 2023, 2:04 p.m. UTC
  Add some helpers that will be used by the early kernel mapping code to
check feature support on the local CPU. This permits the early kernel
mapping to be created with the right attributes, removing the need for
tearing it down and recreating it.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/cpufeature.h | 56 ++++++++++++++++++++
 arch/arm64/kernel/cpufeature.c      | 12 +----
 2 files changed, 57 insertions(+), 11 deletions(-)
  

Patch

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index edc7733aa49846b2..edefe3b36fe5c243 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -933,6 +933,62 @@  extern struct arm64_ftr_override arm64_sw_feature_override;
 u32 get_kvm_ipa_limit(void);
 void dump_cpu_features(void);
 
+static inline bool cpu_has_bti(void)
+{
+	u64 pfr1;
+
+	if (!IS_ENABLED(CONFIG_ARM64_BTI))
+		return false;
+
+	pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+	pfr1 &= ~id_aa64pfr1_override.mask;
+	pfr1 |= id_aa64pfr1_override.val;
+
+	return cpuid_feature_extract_unsigned_field(pfr1,
+						    ID_AA64PFR1_EL1_BT_SHIFT);
+}
+
+static inline bool cpu_has_e0pd(void)
+{
+	u64 mmfr2;
+
+	if (!IS_ENABLED(CONFIG_ARM64_E0PD))
+		return false;
+
+	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+	return cpuid_feature_extract_unsigned_field(mmfr2,
+						    ID_AA64MMFR2_EL1_E0PD_SHIFT);
+}
+
+static inline bool cpu_has_pac(void)
+{
+	u64 isar1, isar2;
+	u8 feat;
+
+	if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+		return false;
+
+	isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+	isar1 &= ~id_aa64isar1_override.mask;
+	isar1 |= id_aa64isar1_override.val;
+	feat = cpuid_feature_extract_unsigned_field(isar1,
+						    ID_AA64ISAR1_EL1_APA_SHIFT);
+	if (feat)
+		return true;
+
+	feat = cpuid_feature_extract_unsigned_field(isar1,
+						    ID_AA64ISAR1_EL1_API_SHIFT);
+	if (feat)
+		return true;
+
+	isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
+	isar2 &= ~id_aa64isar2_override.mask;
+	isar2 |= id_aa64isar2_override.val;
+	feat = cpuid_feature_extract_unsigned_field(isar2,
+						    ID_AA64ISAR2_EL1_APA3_SHIFT);
+	return feat;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0b16e676b68c6543..9838934fee028bcb 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1609,21 +1609,11 @@  has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
  */
 bool kaslr_requires_kpti(void)
 {
-	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-		return false;
-
 	/*
 	 * E0PD does a similar job to KPTI so can be used instead
 	 * where available.
 	 */
-	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
-		u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
-		if (cpuid_feature_extract_unsigned_field(mmfr2,
-						ID_AA64MMFR2_EL1_E0PD_SHIFT))
-			return false;
-	}
-
-	return kaslr_enabled();
+	return kaslr_enabled() && !cpu_has_e0pd();
 }
 
 static bool __meltdown_safe = true;