@@ -5,10 +5,10 @@
#ifdef CONFIG_PPC_WATCHDOG
extern void arch_touch_nmi_watchdog(void);
long soft_nmi_interrupt(struct pt_regs *regs);
-void watchdog_nmi_set_timeout_pct(u64 pct);
+void watchdog_hardlockup_set_timeout_pct(u64 pct);
#else
static inline void arch_touch_nmi_watchdog(void) {}
-static inline void watchdog_nmi_set_timeout_pct(u64 pct) {}
+static inline void watchdog_hardlockup_set_timeout_pct(u64 pct) {}
#endif
#ifdef CONFIG_NMI_IPI
@@ -438,7 +438,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
int cpu = smp_processor_id();
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
return HRTIMER_NORESTART;
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
@@ -479,7 +479,7 @@ static void start_watchdog(void *arg)
return;
}
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
return;
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
@@ -546,7 +546,7 @@ static void watchdog_calc_timeouts(void)
wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
}
-void watchdog_nmi_stop(void)
+void watchdog_hardlockup_stop(void)
{
int cpu;
@@ -554,7 +554,7 @@ void watchdog_nmi_stop(void)
stop_watchdog_on_cpu(cpu);
}
-void watchdog_nmi_start(void)
+void watchdog_hardlockup_start(void)
{
int cpu;
@@ -566,7 +566,7 @@ void watchdog_nmi_start(void)
/*
* Invoked from core watchdog init.
*/
-int __init watchdog_nmi_probe(void)
+int __init watchdog_hardlockup_probe(void)
{
int err;
@@ -582,7 +582,7 @@ int __init watchdog_nmi_probe(void)
}
#ifdef CONFIG_PPC_PSERIES
-void watchdog_nmi_set_timeout_pct(u64 pct)
+void watchdog_hardlockup_set_timeout_pct(u64 pct)
{
pr_info("Set the NMI watchdog timeout factor to %llu%%\n", pct);
WRITE_ONCE(wd_timeout_pct, pct);
@@ -750,7 +750,7 @@ static int pseries_migrate_partition(u64 handle)
goto out;
if (factor)
- watchdog_nmi_set_timeout_pct(factor);
+ watchdog_hardlockup_set_timeout_pct(factor);
ret = pseries_suspend(handle);
if (ret == 0) {
@@ -766,7 +766,7 @@ static int pseries_migrate_partition(u64 handle)
pseries_cancel_migration(handle, ret);
if (factor)
- watchdog_nmi_set_timeout_pct(0);
+ watchdog_hardlockup_set_timeout_pct(0);
out:
vas_migration_handler(VAS_RESUME);
@@ -282,7 +282,7 @@ __setup("nmi_watchdog=", setup_nmi_watchdog);
* sparc specific NMI watchdog enable function.
* Enables watchdog if it is not enabled already.
*/
-void watchdog_nmi_enable(unsigned int cpu)
+void watchdog_hardlockup_enable(unsigned int cpu)
{
if (atomic_read(&nmi_active) == -1) {
pr_warn("NMI watchdog cannot be enabled or disabled\n");
@@ -303,7 +303,7 @@ void watchdog_nmi_enable(unsigned int cpu)
* sparc specific NMI watchdog disable function.
* Disables watchdog if it is not disabled already.
*/
-void watchdog_nmi_disable(unsigned int cpu)
+void watchdog_hardlockup_disable(unsigned int cpu)
{
if (atomic_read(&nmi_active) == -1)
pr_warn_once("NMI watchdog cannot be enabled or disabled\n");
@@ -75,10 +75,10 @@ static inline void reset_hung_task_detector(void) { }
* handled differently because its value is not boolean, and the lockup
* detectors are 'suspended' while 'watchdog_thresh' is equal zero.
*/
-#define NMI_WATCHDOG_ENABLED_BIT 0
-#define SOFT_WATCHDOG_ENABLED_BIT 1
-#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+#define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0
+#define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1
+#define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT)
+#define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
@@ -120,11 +120,11 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
# endif
#endif
-void watchdog_nmi_stop(void);
-void watchdog_nmi_start(void);
-int watchdog_nmi_probe(void);
-void watchdog_nmi_enable(unsigned int cpu);
-void watchdog_nmi_disable(unsigned int cpu);
+void watchdog_hardlockup_stop(void);
+void watchdog_hardlockup_start(void);
+int watchdog_hardlockup_probe(void);
+void watchdog_hardlockup_enable(unsigned int cpu);
+void watchdog_hardlockup_disable(unsigned int cpu);
void lockup_detector_reconfigure(void);
@@ -30,17 +30,17 @@
static DEFINE_MUTEX(watchdog_mutex);
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
-# define NMI_WATCHDOG_DEFAULT 1
+# define WATCHDOG_HARDLOCKUP_DEFAULT 1
#else
-# define NMI_WATCHDOG_DEFAULT 0
+# define WATCHDOG_HARDLOCKUP_DEFAULT 0
#endif
unsigned long __read_mostly watchdog_enabled;
int __read_mostly watchdog_user_enabled = 1;
-int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
-int __read_mostly soft_watchdog_user_enabled = 1;
+int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
+int __read_mostly watchdog_softlockup_user_enabled = 1;
int __read_mostly watchdog_thresh = 10;
-static int __read_mostly nmi_watchdog_available;
+static int __read_mostly watchdog_hardlockup_available;
struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -66,7 +66,7 @@ unsigned int __read_mostly hardlockup_panic =
*/
void __init hardlockup_detector_disable(void)
{
- nmi_watchdog_user_enabled = 0;
+ watchdog_hardlockup_user_enabled = 0;
}
static int __init hardlockup_panic_setup(char *str)
@@ -76,9 +76,9 @@ static int __init hardlockup_panic_setup(char *str)
else if (!strncmp(str, "nopanic", 7))
hardlockup_panic = 0;
else if (!strncmp(str, "0", 1))
- nmi_watchdog_user_enabled = 0;
+ watchdog_hardlockup_user_enabled = 0;
else if (!strncmp(str, "1", 1))
- nmi_watchdog_user_enabled = 1;
+ watchdog_hardlockup_user_enabled = 1;
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -190,40 +190,40 @@ static inline void watchdog_hardlockup_kick(void) { }
* These functions can be overridden if an architecture implements its
* own hardlockup detector.
*
- * watchdog_nmi_enable/disable can be implemented to start and stop when
+ * watchdog_hardlockup_enable/disable can be implemented to start and stop when
* softlockup watchdog start and stop. The arch must select the
* SOFTLOCKUP_DETECTOR Kconfig.
*/
-void __weak watchdog_nmi_enable(unsigned int cpu)
+void __weak watchdog_hardlockup_enable(unsigned int cpu)
{
hardlockup_detector_perf_enable();
}
-void __weak watchdog_nmi_disable(unsigned int cpu)
+void __weak watchdog_hardlockup_disable(unsigned int cpu)
{
hardlockup_detector_perf_disable();
}
-/* Return 0, if a NMI watchdog is available. Error code otherwise */
-int __weak __init watchdog_nmi_probe(void)
+/* Return 0, if a hardlockup watchdog is available. Error code otherwise */
+int __weak __init watchdog_hardlockup_probe(void)
{
return hardlockup_detector_perf_init();
}
/**
- * watchdog_nmi_stop - Stop the watchdog for reconfiguration
+ * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
*
* The reconfiguration steps are:
- * watchdog_nmi_stop();
+ * watchdog_hardlockup_stop();
* update_variables();
- * watchdog_nmi_start();
+ * watchdog_hardlockup_start();
*/
-void __weak watchdog_nmi_stop(void) { }
+void __weak watchdog_hardlockup_stop(void) { }
/**
- * watchdog_nmi_start - Start the watchdog after reconfiguration
+ * watchdog_hardlockup_start - Start the watchdog after reconfiguration
*
- * Counterpart to watchdog_nmi_stop().
+ * Counterpart to watchdog_hardlockup_stop().
*
* The following variables have been updated in update_variables() and
* contain the currently valid configuration:
@@ -231,23 +231,23 @@ void __weak watchdog_nmi_stop(void) { }
* - watchdog_thresh
* - watchdog_cpumask
*/
-void __weak watchdog_nmi_start(void) { }
+void __weak watchdog_hardlockup_start(void) { }
/**
* lockup_detector_update_enable - Update the sysctl enable bit
*
- * Caller needs to make sure that the NMI/perf watchdogs are off, so this
- * can't race with watchdog_nmi_disable().
+ * Caller needs to make sure that the hard watchdogs are off, so this
+ * can't race with watchdog_hardlockup_disable().
*/
static void lockup_detector_update_enable(void)
{
watchdog_enabled = 0;
if (!watchdog_user_enabled)
return;
- if (nmi_watchdog_available && nmi_watchdog_user_enabled)
- watchdog_enabled |= NMI_WATCHDOG_ENABLED;
- if (soft_watchdog_user_enabled)
- watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
+ if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
+ watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
+ if (watchdog_softlockup_user_enabled)
+ watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
}
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
@@ -288,7 +288,7 @@ __setup("nowatchdog", nowatchdog_setup);
static int __init nosoftlockup_setup(char *str)
{
- soft_watchdog_user_enabled = 0;
+ watchdog_softlockup_user_enabled = 0;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
@@ -402,7 +402,7 @@ static int is_softlockup(unsigned long touch_ts,
unsigned long period_ts,
unsigned long now)
{
- if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
+ if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
/* Warn about unreasonable delays. */
if (time_after(now, period_ts + get_softlockup_thresh()))
return now - touch_ts;
@@ -537,7 +537,7 @@ static void watchdog_enable(unsigned int cpu)
complete(done);
/*
- * Start the timer first to prevent the NMI watchdog triggering
+ * Start the timer first to prevent the hardlockup watchdog triggering
* before the timer has a chance to fire.
*/
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
@@ -547,9 +547,9 @@ static void watchdog_enable(unsigned int cpu)
/* Initialize timestamp */
update_touch_ts();
- /* Enable the perf event */
- if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
- watchdog_nmi_enable(cpu);
+ /* Enable the hardlockup detector */
+ if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
+ watchdog_hardlockup_enable(cpu);
}
static void watchdog_disable(unsigned int cpu)
@@ -559,11 +559,11 @@ static void watchdog_disable(unsigned int cpu)
WARN_ON_ONCE(cpu != smp_processor_id());
/*
- * Disable the perf event first. That prevents that a large delay
- * between disabling the timer and disabling the perf event causes
- * the perf NMI to detect a false positive.
+ * Disable the hardlockup detector first. That prevents that a large
+ * delay between disabling the timer and disabling the hardlockup
+ * detector causes a false positive.
*/
- watchdog_nmi_disable(cpu);
+ watchdog_hardlockup_disable(cpu);
hrtimer_cancel(hrtimer);
wait_for_completion(this_cpu_ptr(&softlockup_completion));
}
@@ -619,7 +619,7 @@ int lockup_detector_offline_cpu(unsigned int cpu)
static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
- watchdog_nmi_stop();
+ watchdog_hardlockup_stop();
softlockup_stop_all();
set_sample_period();
@@ -627,7 +627,7 @@ static void __lockup_detector_reconfigure(void)
if (watchdog_enabled && watchdog_thresh)
softlockup_start_all();
- watchdog_nmi_start();
+ watchdog_hardlockup_start();
cpus_read_unlock();
/*
* Must be called outside the cpus locked section to prevent
@@ -668,9 +668,9 @@ static __init void lockup_detector_setup(void)
static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
- watchdog_nmi_stop();
+ watchdog_hardlockup_stop();
lockup_detector_update_enable();
- watchdog_nmi_start();
+ watchdog_hardlockup_start();
cpus_read_unlock();
}
void lockup_detector_reconfigure(void)
@@ -725,14 +725,14 @@ static void proc_watchdog_update(void)
/*
* common function for watchdog, nmi_watchdog and soft_watchdog parameter
*
- * caller | table->data points to | 'which'
- * -------------------|----------------------------|--------------------------
- * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
- * | | SOFT_WATCHDOG_ENABLED
- * -------------------|----------------------------|--------------------------
- * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
- * -------------------|----------------------------|--------------------------
- * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
+ * caller | table->data points to | 'which'
+ * -------------------|----------------------------------|-------------------------------
+ * proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
+ * | | WATCHDOG_SOFTOCKUP_ENABLED
+ * -------------------|----------------------------------|-------------------------------
+ * proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
+ * -------------------|----------------------------------|-------------------------------
+ * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
*/
static int proc_watchdog_common(int which, struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -764,7 +764,8 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
int proc_watchdog(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
+ return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
+ WATCHDOG_SOFTOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
@@ -774,9 +775,9 @@ int proc_watchdog(struct ctl_table *table, int write,
int proc_nmi_watchdog(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- if (!nmi_watchdog_available && write)
+ if (!watchdog_hardlockup_available && write)
return -ENOTSUPP;
- return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
+ return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
@@ -786,7 +787,7 @@ int proc_nmi_watchdog(struct ctl_table *table, int write,
int proc_soft_watchdog(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
+ return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
table, write, buffer, lenp, ppos);
}
@@ -854,7 +855,7 @@ static struct ctl_table watchdog_sysctls[] = {
},
{
.procname = "nmi_watchdog",
- .data = &nmi_watchdog_user_enabled,
+ .data = &watchdog_hardlockup_user_enabled,
.maxlen = sizeof(int),
.mode = NMI_WATCHDOG_SYSCTL_PERM,
.proc_handler = proc_nmi_watchdog,
@@ -871,7 +872,7 @@ static struct ctl_table watchdog_sysctls[] = {
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
{
.procname = "soft_watchdog",
- .data = &soft_watchdog_user_enabled,
+ .data = &watchdog_softlockup_user_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_soft_watchdog,
@@ -940,8 +941,8 @@ void __init lockup_detector_init(void)
cpumask_copy(&watchdog_cpumask,
housekeeping_cpumask(HK_TYPE_TIMER));
- if (!watchdog_nmi_probe())
- nmi_watchdog_available = true;
+ if (!watchdog_hardlockup_probe())
+ watchdog_hardlockup_available = true;
lockup_detector_setup();
watchdog_sysctl_init();
}
@@ -215,7 +215,7 @@ void __init hardlockup_detector_perf_restart(void)
lockdep_assert_cpus_held();
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
return;
for_each_online_cpu(cpu) {