[v8,01/10] arm64: Add framework to turn IPI as NMI
Commit Message
From: Sumit Garg <sumit.garg@linaro.org>
Introduce framework to turn an IPI as NMI using pseudo NMIs. The main
motivation for this feature is to have an IPI that can be leveraged to
invoke NMI functions on other CPUs.
And current prospective users are NMI backtrace and KGDB CPUs round-up
whose support is added via future patches.
Signed-off-by: Sumit Garg <sumit.garg@linaro.org>
Reviewed-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Tested-by: Chen-Yu Tsai <wens@csie.org>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
---
Changes in v8:
- dynamic_ipi_setup() and dynamic_ipi_teardown() no longer take cpu param
arch/arm64/include/asm/nmi.h | 17 ++++++++++
arch/arm64/kernel/Makefile | 2 +-
arch/arm64/kernel/ipi_nmi.c | 65 ++++++++++++++++++++++++++++++++++++
3 files changed, 83 insertions(+), 1 deletion(-)
create mode 100644 arch/arm64/include/asm/nmi.h
create mode 100644 arch/arm64/kernel/ipi_nmi.c
new file mode 100644
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_NMI_H
+#define __ASM_NMI_H
+
+#ifndef __ASSEMBLER__
+
+#include <linux/cpumask.h>
+
+extern bool arm64_supports_nmi(void);
+extern void arm64_send_nmi(cpumask_t *mask);
+
+void set_smp_dynamic_ipi(int ipi);
+void dynamic_ipi_setup(void);
+void dynamic_ipi_teardown(void);
+
+#endif /* !__ASSEMBLER__ */
+#endif
@@ -34,7 +34,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
syscall.o proton-pack.o idreg-override.o idle.o \
- patching.o
+ patching.o ipi_nmi.o
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
new file mode 100644
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NMI support for IPIs
+ *
+ * Copyright (C) 2020 Linaro Limited
+ * Author: Sumit Garg <sumit.garg@linaro.org>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+
+#include <asm/nmi.h>
+
+static struct irq_desc *ipi_nmi_desc __read_mostly;
+static int ipi_nmi_id __read_mostly;
+
+bool arm64_supports_nmi(void)
+{
+ if (ipi_nmi_desc)
+ return true;
+
+ return false;
+}
+
+void arm64_send_nmi(cpumask_t *mask)
+{
+ if (WARN_ON_ONCE(!ipi_nmi_desc))
+ return;
+
+ __ipi_send_mask(ipi_nmi_desc, mask);
+}
+
+static irqreturn_t ipi_nmi_handler(int irq, void *data)
+{
+ /* nop, NMI handlers for special features can be added here. */
+
+ return IRQ_NONE;
+}
+
+void dynamic_ipi_setup(void)
+{
+ if (!ipi_nmi_desc)
+ return;
+
+ if (!prepare_percpu_nmi(ipi_nmi_id))
+ enable_percpu_nmi(ipi_nmi_id, IRQ_TYPE_NONE);
+}
+
+void dynamic_ipi_teardown(void)
+{
+ if (!ipi_nmi_desc)
+ return;
+
+ disable_percpu_nmi(ipi_nmi_id);
+ teardown_percpu_nmi(ipi_nmi_id);
+}
+
+void __init set_smp_dynamic_ipi(int ipi)
+{
+ if (!request_percpu_nmi(ipi, ipi_nmi_handler, "IPI", &cpu_number)) {
+ ipi_nmi_desc = irq_to_desc(ipi);
+ ipi_nmi_id = ipi;
+ }
+}