[v1,RFC,Zisslpcfi,04/20] riscv: kernel enabling user code for shadow stack and landing pad

Message ID 20230213045351.3945824-5-debug@rivosinc.com
State New
Headers
Series riscv control-flow integrity for U mode |

Commit Message

Deepak Gupta Feb. 13, 2023, 4:53 a.m. UTC
  Enables architectural support for shadow stack and landing pad instr
for user mode on riscv.

This patch does following
- Defines a new structure cfi_status
- Includes cfi_status in thread_info
- Defines offsets to new member fields in thread_info in asm-offsets.c
- Saves and restore cfi state on trap entry (U --> S) and exit (S --> U)

Signed-off-by: Deepak Gupta <debug@rivosinc.com>
---
 arch/riscv/include/asm/processor.h   | 11 ++++++++
 arch/riscv/include/asm/thread_info.h |  5 ++++
 arch/riscv/kernel/asm-offsets.c      |  5 ++++
 arch/riscv/kernel/entry.S            | 40 ++++++++++++++++++++++++++++
 4 files changed, 61 insertions(+)
  

Patch

diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index bdebce2cc323..f065309927b1 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -41,6 +41,17 @@  struct thread_struct {
 	unsigned long bad_cause;
 };
 
+#if defined(CONFIG_USER_SHADOW_STACK) || defined(CONFIG_USER_INDIRECT_BR_LP)
+struct cfi_status {
+	unsigned int ufcfi_en : 1; /* Enable for forward cfi. Note that ELP goes in sstatus */
+	unsigned int ubcfi_en : 1; /* Enable for backward cfi. */
+	unsigned int rsvd1 : 30;
+	unsigned int lp_label; /* saved label value (25bit) */
+	long user_shdw_stk; /* Current user shadow stack pointer */
+	long shdw_stk_base; /* Base address of shadow stack */
+};
+#endif
+
 /* Whitelist the fstate from the task_struct for hardened usercopy */
 static inline void arch_thread_struct_whitelist(unsigned long *offset,
 						unsigned long *size)
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 67322f878e0d..f74b8bd55d5b 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -65,6 +65,11 @@  struct thread_info {
 	 */
 	long			kernel_sp;	/* Kernel stack pointer */
 	long			user_sp;	/* User stack pointer */
+#if defined(CONFIG_USER_SHADOW_STACK) || defined(CONFIG_USER_INDIRECT_BR_LP)
+	/* cfi_state only if config is defined */
+	/* state of user cfi state. note this includes LPLR and SSP as well */
+	struct cfi_status       user_cfi_state;
+#endif
 	int			cpu;
 };
 
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index df9444397908..340e6413cf3c 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -38,6 +38,11 @@  void asm_offsets(void)
 	OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
 	OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
 
+#if defined(CONFIG_USER_SHADOW_STACK) || defined(CONFIG_USER_INDIRECT_BR_LP)
+	OFFSET(TASK_TI_USER_CFI_STATUS, task_struct, thread_info.user_cfi_state);
+	OFFSET(TASK_TI_USER_LPLR, task_struct, thread_info.user_cfi_state.lp_label);
+	OFFSET(TASK_TI_USER_SSP, task_struct, thread_info.user_cfi_state.user_shdw_stk);
+#endif
 	OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
 	OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
 	OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 99d38fdf8b18..f283130c81ec 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -73,6 +73,31 @@  _save_context:
 	REG_S x30, PT_T5(sp)
 	REG_S x31, PT_T6(sp)
 
+#if	defined(CONFIG_USER_SHADOW_STACK) || defined(CONFIG_USER_INDIRECT_BR_LP)
+	/*
+	* If U --> S, CSR_SCRATCH should be holding U TP
+	* If S --> S, CSR_SCRATCH should be holding S TP
+	* s2 == tp means, previous mode was S
+	* else previous mode U
+	* we need to save cfi status only when previous mode was U
+	*/
+	csrr s2, CSR_SCRATCH
+	xor s2, s2, tp
+	beqz s2, skip_bcfi_save
+	/* load cfi status word */
+	lw s2, TASK_TI_USER_CFI_STATUS(tp)
+	andi s3, s2, 1
+	beqz s3, skip_fcfi_save
+	/* fcfi is enabled, capture ELP and LPLR state and record it */
+	csrr s3, CSR_LPLR /* record label register */
+	sw s3, TASK_TI_USER_LPLR(tp) /* save it back in thread_info structure */
+skip_fcfi_save:
+	andi s3, s2, 2
+	beqz s3, skip_bcfi_save
+	csrr s3, CSR_SSP
+	REG_S s3, TASK_TI_USER_SSP(tp) /* save user ssp in thread_info */
+skip_bcfi_save:
+#endif
 	/*
 	 * Disable user-mode memory access as it should only be set in the
 	 * actual user copy routines.
@@ -283,6 +308,21 @@  resume_userspace:
 	 */
 	csrw CSR_SCRATCH, tp
 
+#if	defined(CONFIG_USER_SHADOW_STACK) || defined(CONFIG_USER_INDIRECT_BR_LP)
+	lw s2, TASK_TI_USER_CFI_STATUS(tp)
+	andi s3, s2, 1
+	beqz s3, skip_fcfi_resume
+	xor s3, s3, s3
+	lw s3, TASK_TI_USER_LPLR(tp)
+	csrw CSR_LPLR, s3
+skip_fcfi_resume:
+	andi s3, s2, 2
+	beqz s3, skip_bcfi_resume
+	REG_L s3, TASK_TI_USER_SSP(tp) /* save user ssp in thread_info */
+	csrw CSR_SSP, s3
+skip_bcfi_resume:
+#endif
+
 restore_all:
 #ifdef CONFIG_TRACE_IRQFLAGS
 	REG_L s1, PT_STATUS(sp)