This commit switch the way of disabling irqs to CSR_IE masking.
After CSR_IE has been made a part of context, now we can safely
switch to CSR_IE masking when disabling irqs.
Signed-off-by: Xu Lu <luxu.kernel@bytedance.com>
Signed-off-by: Hangjing Li <lihangjing@bytedance.com>
Reviewed-by: Liang Deng <dengliang.1214@bytedance.com>
Reviewed-by: Yu Li <liyu.yukiteru@bytedance.com>
---
arch/riscv/include/asm/irqflags.h | 58 +++++++++++++++++++++++++++++++
arch/riscv/include/asm/ptrace.h | 4 +++
arch/riscv/kernel/entry.S | 7 +++-
arch/riscv/kernel/head.S | 10 ++++++
4 files changed, 78 insertions(+), 1 deletion(-)
@@ -10,6 +10,62 @@
#include <asm/processor.h>
#include <asm/csr.h>
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+
+static inline void local_irq_switch_on(void)
+{
+ csr_set(CSR_STATUS, SR_IE);
+}
+
+static inline void local_irq_switch_off(void)
+{
+ csr_clear(CSR_STATUS, SR_IE);
+}
+
+/* read interrupt enabled status */
+static inline unsigned long arch_local_save_flags(void)
+{
+ return csr_read(CSR_IE);
+}
+
+/* unconditionally enable interrupts */
+static inline void arch_local_irq_enable(void)
+{
+ csr_set(CSR_IE, irqs_enabled_ie);
+}
+
+/* unconditionally disable interrupts */
+static inline void arch_local_irq_disable(void)
+{
+ csr_clear(CSR_IE, irqs_enabled_ie);
+}
+
+/* get status and disable interrupts */
+static inline unsigned long arch_local_irq_save(void)
+{
+ return csr_read_clear(CSR_IE, irqs_enabled_ie);
+}
+
+/* test flags */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags != irqs_enabled_ie);
+}
+
+/* test hardware interrupt enable bit */
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+/* set interrupt enabled status */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ csr_write(CSR_IE, flags);
+}
+
+#else /* CONFIG_RISCV_PSEUDO_NMI */
+
/* read interrupt enabled status */
static inline unsigned long arch_local_save_flags(void)
{
@@ -52,4 +108,6 @@ static inline void arch_local_irq_restore(unsigned long flags)
csr_set(CSR_STATUS, flags & SR_IE);
}
+#endif /* !CONFIG_RISCV_PSEUDO_NMI */
+
#endif /* _ASM_RISCV_IRQFLAGS_H */
@@ -178,7 +178,11 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
static inline int regs_irqs_disabled(struct pt_regs *regs)
{
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ return (regs->ie != irqs_enabled_ie);
+#else
return !(regs->status & SR_PIE);
+#endif
}
#endif /* __ASSEMBLY__ */
@@ -66,8 +66,13 @@ _save_context:
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
#ifdef CONFIG_RISCV_PSEUDO_NMI
- csrr s0, CSR_IE
+ csrrw s0, CSR_IE, x0
REG_S s0, PT_IE(sp)
+ andi s1, s1, SR_PIE
+ beqz s1, 1f
+ li s1, SR_IE
+ csrs CSR_STATUS, s1
+1:
#endif /* CONFIG_RISCV_PSEUDO_NMI */
/*
@@ -169,6 +169,10 @@ secondary_start_sbi:
call relocate_enable_mmu
#endif
call setup_trap_vector
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ li t0, SR_IE
+ csrs CSR_STATUS, t0
+#endif
tail smp_callin
#endif /* CONFIG_SMP */
@@ -320,6 +324,12 @@ clear_bss_done:
#ifdef CONFIG_KASAN
call kasan_early_init
#endif
+
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ li t0, SR_IE
+ csrs CSR_STATUS, t0
+#endif
+
/* Start the kernel */
call soc_early_init
tail start_kernel