@@ -7,6 +7,7 @@
#define _ASM_RISCV_CSR_H
#include <asm/asm.h>
+#include <asm/hwcap.h>
#include <linux/bits.h>
/* Status register flags */
@@ -451,6 +452,22 @@
#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+#define IRQS_ENABLED_IE (IE_SIE | IE_TIE | IE_EIE)
+#define irqs_enabled_ie \
+({ \
+ unsigned long __v; \
+ asm (ALTERNATIVE( \
+ "li %0, " __stringify(IRQS_ENABLED_IE) "\n\t" \
+ "nop", \
+ "li %0, " __stringify(IRQS_ENABLED_IE | SIP_LCOFIP),\
+ 0, RISCV_ISA_EXT_SSCOFPMF, \
+ CONFIG_RISCV_PSEUDO_NMI) \
+ : "=r"(__v) : : ); \
+ __v; \
+})
+#endif /* CONFIG_RISCV_PSEUDO_NMI */
+
#ifndef __ASSEMBLY__
#define csr_swap(csr, val) \
@@ -47,6 +47,9 @@ struct pt_regs {
unsigned long t6;
/* Supervisor/Machine CSRs */
unsigned long status;
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ unsigned long ie;
+#endif
unsigned long badaddr;
unsigned long cause;
/* a0 value before the syscall */
@@ -112,6 +112,9 @@ void asm_offsets(void)
OFFSET(PT_GP, pt_regs, gp);
OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
OFFSET(PT_STATUS, pt_regs, status);
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ OFFSET(PT_IE, pt_regs, ie);
+#endif
OFFSET(PT_BADADDR, pt_regs, badaddr);
OFFSET(PT_CAUSE, pt_regs, cause);
@@ -65,6 +65,10 @@ _save_context:
REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ csrr s0, CSR_IE
+ REG_S s0, PT_IE(sp)
+#endif /* CONFIG_RISCV_PSEUDO_NMI */
/*
* Set the scratch register to 0, so that if a recursive exception
@@ -153,6 +157,11 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
csrw CSR_STATUS, a0
csrw CSR_EPC, a2
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ REG_L s0, PT_IE(sp)
+ csrw CSR_IE, s0
+#endif /* CONFIG_RISCV_PSEUDO_NMI */
+
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
@@ -251,6 +260,10 @@ restore_caller_reg:
REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ csrr s0, CSR_IE
+ REG_S s0, PT_IE(sp)
+#endif /* CONFIG_RISCV_PSEUDO_NMI */
move a0, sp
tail handle_bad_stack
SYM_CODE_END(handle_kernel_stack_overflow)
@@ -115,6 +115,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
regs->status = SR_PIE;
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ regs->ie = irqs_enabled_ie;
+#endif
if (has_fpu()) {
regs->status |= SR_FS_INITIAL;
/*
@@ -189,6 +192,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->gp = gp_in_global;
/* Supervisor/Machine, irqs on: */
childregs->status = SR_PP | SR_PIE;
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+ childregs->ie = irqs_enabled_ie;
+#endif
p->thread.s[0] = (unsigned long)args->fn;
p->thread.s[1] = (unsigned long)args->fn_arg;
@@ -47,6 +47,7 @@ ENTRY(__cpu_suspend_enter)
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrr t0, CSR_STATUS
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ /* There is no need to save CSR_IE as it is maintained in memory */
csrr t0, CSR_TVAL
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrr t0, CSR_CAUSE
@@ -114,7 +114,9 @@ static int clint_clock_next_event(unsigned long delta,
void __iomem *r = clint_timer_cmp +
cpuid_to_hartid_map(smp_processor_id());
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_set(CSR_IE, IE_TIE);
+#endif
writeq_relaxed(clint_get_cycles64() + delta, r);
return 0;
}
@@ -155,7 +157,9 @@ static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_clear(CSR_IE, IE_TIE);
+#endif
evdev->event_handler(evdev);
return IRQ_HANDLED;
@@ -36,7 +36,9 @@ static int riscv_clock_next_event(unsigned long delta,
{
u64 next_tval = get_cycles64() + delta;
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_set(CSR_IE, IE_TIE);
+#endif
if (static_branch_likely(&riscv_sstc_available)) {
#if defined(CONFIG_32BIT)
csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
@@ -119,7 +121,9 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_clear(CSR_IE, IE_TIE);
+#endif
evdev->event_handler(evdev);
return IRQ_HANDLED;
@@ -39,12 +39,16 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
static void riscv_intc_irq_mask(struct irq_data *d)
{
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_clear(CSR_IE, BIT(d->hwirq));
+#endif
}
static void riscv_intc_irq_unmask(struct irq_data *d)
{
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_set(CSR_IE, BIT(d->hwirq));
+#endif
}
static void riscv_intc_irq_eoi(struct irq_data *d)
@@ -778,7 +778,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq;
csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
+#endif
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
}
@@ -789,7 +791,9 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
{
if (riscv_pmu_use_irq) {
disable_percpu_irq(riscv_pmu_irq);
+#ifndef CONFIG_RISCV_PSEUDO_NMI
csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
+#endif
}
/* Disable all counters access for user mode now */