[v8,10/10] arm64: ipi_nmi: Fallback to a regular IPI if NMI isn't enabled
Commit Message
The current ipi_nmi implementation relies on the arm64 pseudo-NMI
support. This needs to be enabled in both the kernel config with
CONFIG_ARM64_PSEUDO_NMI and on the kernel command line with
"irqchip.gicv3_pseudo_nmi=1".
Let's add a fallback of using a regular IPI if the NMI isn't
enabled. The fallback mechanism of using a regular IPI matches what
arm32 does all the time since there is no NMI there.
The reason for doing this is to make the trigger_all_cpu_backtrace()
class of functions work. While those functions all return a bool
indicating that the caller should try a fallback upon failure, an
inspection of the callers shows that nearly nobody implements a
fallback. It's better to at least provide something here.
Signed-off-by: Douglas Anderson <dianders@chromium.org>
---
I dunno what people think of this patch. If it's great, we could
actually drop some of the patches out of this series since some of
them are to account for the fact that we might not be able to register
an "ipi_nmi". If it's awful, it could simply be dropped.
Changes in v8:
- "Fallback to a regular IPI if NMI isn't enabled" new for v8
arch/arm64/kernel/ipi_nmi.c | 31 +++++++++++++++++++++++++------
1 file changed, 25 insertions(+), 6 deletions(-)
@@ -16,6 +16,7 @@
static struct irq_desc *ipi_nmi_desc __read_mostly;
static int ipi_nmi_id __read_mostly;
+static bool is_nmi;
bool arm64_supports_nmi(void)
{
@@ -62,8 +63,12 @@ void dynamic_ipi_setup(void)
if (!ipi_nmi_desc)
return;
- if (!prepare_percpu_nmi(ipi_nmi_id))
- enable_percpu_nmi(ipi_nmi_id, IRQ_TYPE_NONE);
+ if (is_nmi) {
+ if (!prepare_percpu_nmi(ipi_nmi_id))
+ enable_percpu_nmi(ipi_nmi_id, IRQ_TYPE_NONE);
+ } else {
+ enable_percpu_irq(ipi_nmi_id, IRQ_TYPE_NONE);
+ }
}
void dynamic_ipi_teardown(void)
@@ -71,14 +76,28 @@ void dynamic_ipi_teardown(void)
if (!ipi_nmi_desc)
return;
- disable_percpu_nmi(ipi_nmi_id);
- teardown_percpu_nmi(ipi_nmi_id);
+ if (is_nmi) {
+ disable_percpu_nmi(ipi_nmi_id);
+ teardown_percpu_nmi(ipi_nmi_id);
+ } else {
+ disable_percpu_irq(ipi_nmi_id);
+ }
}
void __init set_smp_dynamic_ipi(int ipi)
{
+ int err;
+
if (!request_percpu_nmi(ipi, ipi_nmi_handler, "IPI", &cpu_number)) {
- ipi_nmi_desc = irq_to_desc(ipi);
- ipi_nmi_id = ipi;
+ is_nmi = true;
+ } else {
+ err = request_percpu_irq(ipi, ipi_nmi_handler, "IPI", &cpu_number);
+ if (WARN_ON(err))
+ return;
+
+ irq_set_status_flags(ipi, IRQ_HIDDEN);
}
+
+ ipi_nmi_desc = irq_to_desc(ipi);
+ ipi_nmi_id = ipi;
}