|
@@ -29,15 +29,58 @@
|
|
#include <linux/kvm_para.h>
|
|
#include <linux/kvm_para.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/kthread.h>
|
|
|
|
|
|
|
|
+/* Watchdog configuration */
|
|
static DEFINE_MUTEX(watchdog_proc_mutex);
|
|
static DEFINE_MUTEX(watchdog_proc_mutex);
|
|
|
|
|
|
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
|
|
|
|
-unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
|
|
|
|
|
|
+int __read_mostly nmi_watchdog_enabled;
|
|
|
|
+
|
|
|
|
+#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
|
|
|
|
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
|
|
|
|
+ NMI_WATCHDOG_ENABLED;
|
|
#else
|
|
#else
|
|
unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
|
|
unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
|
|
#endif
|
|
#endif
|
|
-int __read_mostly nmi_watchdog_enabled;
|
|
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
|
|
|
+/* boot commands */
|
|
|
|
+/*
|
|
|
|
+ * Should we panic when a soft-lockup or hard-lockup occurs:
|
|
|
|
+ */
|
|
|
|
+unsigned int __read_mostly hardlockup_panic =
|
|
|
|
+ CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
|
|
|
|
+/*
|
|
|
|
+ * We may not want to enable hard lockup detection by default in all cases,
|
|
|
|
+ * for example when running the kernel as a guest on a hypervisor. In these
|
|
|
|
+ * cases this function can be called to disable hard lockup detection. This
|
|
|
|
+ * function should only be executed once by the boot processor before the
|
|
|
|
+ * kernel command line parameters are parsed, because otherwise it is not
|
|
|
|
+ * possible to override this in hardlockup_panic_setup().
|
|
|
|
+ */
|
|
|
|
+void hardlockup_detector_disable(void)
|
|
|
|
+{
|
|
|
|
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __init hardlockup_panic_setup(char *str)
|
|
|
|
+{
|
|
|
|
+ if (!strncmp(str, "panic", 5))
|
|
|
|
+ hardlockup_panic = 1;
|
|
|
|
+ else if (!strncmp(str, "nopanic", 7))
|
|
|
|
+ hardlockup_panic = 0;
|
|
|
|
+ else if (!strncmp(str, "0", 1))
|
|
|
|
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
|
|
|
|
+ else if (!strncmp(str, "1", 1))
|
|
|
|
+ watchdog_enabled |= NMI_WATCHDOG_ENABLED;
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+__setup("nmi_watchdog=", hardlockup_panic_setup);
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
|
|
int __read_mostly soft_watchdog_enabled;
|
|
int __read_mostly soft_watchdog_enabled;
|
|
|
|
+#endif
|
|
|
|
+
|
|
int __read_mostly watchdog_user_enabled;
|
|
int __read_mostly watchdog_user_enabled;
|
|
int __read_mostly watchdog_thresh = 10;
|
|
int __read_mostly watchdog_thresh = 10;
|
|
|
|
|
|
@@ -45,15 +88,9 @@ int __read_mostly watchdog_thresh = 10;
|
|
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
|
|
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
|
|
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
|
|
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
|
|
#endif
|
|
#endif
|
|
-static struct cpumask watchdog_cpumask __read_mostly;
|
|
|
|
|
|
+struct cpumask watchdog_cpumask __read_mostly;
|
|
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
|
|
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
|
|
|
|
|
|
-/* Helper for online, unparked cpus. */
|
|
|
|
-#define for_each_watchdog_cpu(cpu) \
|
|
|
|
- for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
|
|
|
|
-
|
|
|
|
-atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* The 'watchdog_running' variable is set to 1 when the watchdog threads
|
|
* The 'watchdog_running' variable is set to 1 when the watchdog threads
|
|
* are registered/started and is set to 0 when the watchdog threads are
|
|
* are registered/started and is set to 0 when the watchdog threads are
|
|
@@ -72,7 +109,27 @@ static int __read_mostly watchdog_running;
|
|
* of 'watchdog_running' cannot change while the watchdog is deactivated
|
|
* of 'watchdog_running' cannot change while the watchdog is deactivated
|
|
* temporarily (see related code in 'proc' handlers).
|
|
* temporarily (see related code in 'proc' handlers).
|
|
*/
|
|
*/
|
|
-static int __read_mostly watchdog_suspended;
|
|
|
|
|
|
+int __read_mostly watchdog_suspended;
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * These functions can be overridden if an architecture implements its
|
|
|
|
+ * own hardlockup detector.
|
|
|
|
+ */
|
|
|
|
+int __weak watchdog_nmi_enable(unsigned int cpu)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+void __weak watchdog_nmi_disable(unsigned int cpu)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
|
|
|
|
+
|
|
|
|
+/* Helper for online, unparked cpus. */
|
|
|
|
+#define for_each_watchdog_cpu(cpu) \
|
|
|
|
+ for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
|
|
|
|
+
|
|
|
|
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
|
|
|
|
|
|
static u64 __read_mostly sample_period;
|
|
static u64 __read_mostly sample_period;
|
|
|
|
|
|
@@ -120,6 +177,7 @@ static int __init softlockup_all_cpu_backtrace_setup(char *str)
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
|
|
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
|
|
|
|
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
|
static int __init hardlockup_all_cpu_backtrace_setup(char *str)
|
|
static int __init hardlockup_all_cpu_backtrace_setup(char *str)
|
|
{
|
|
{
|
|
sysctl_hardlockup_all_cpu_backtrace =
|
|
sysctl_hardlockup_all_cpu_backtrace =
|
|
@@ -128,6 +186,7 @@ static int __init hardlockup_all_cpu_backtrace_setup(char *str)
|
|
}
|
|
}
|
|
__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
|
|
__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
|
|
#endif
|
|
#endif
|
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
/*
|
|
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
|
|
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
|
|
@@ -213,18 +272,6 @@ void touch_softlockup_watchdog_sync(void)
|
|
__this_cpu_write(watchdog_touch_ts, 0);
|
|
__this_cpu_write(watchdog_touch_ts, 0);
|
|
}
|
|
}
|
|
|
|
|
|
-/* watchdog detector functions */
|
|
|
|
-bool is_hardlockup(void)
|
|
|
|
-{
|
|
|
|
- unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
|
|
|
|
-
|
|
|
|
- if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
|
|
|
|
- return true;
|
|
|
|
-
|
|
|
|
- __this_cpu_write(hrtimer_interrupts_saved, hrint);
|
|
|
|
- return false;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int is_softlockup(unsigned long touch_ts)
|
|
static int is_softlockup(unsigned long touch_ts)
|
|
{
|
|
{
|
|
unsigned long now = get_timestamp();
|
|
unsigned long now = get_timestamp();
|
|
@@ -237,21 +284,21 @@ static int is_softlockup(unsigned long touch_ts)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void watchdog_interrupt_count(void)
|
|
|
|
|
|
+/* watchdog detector functions */
|
|
|
|
+bool is_hardlockup(void)
|
|
{
|
|
{
|
|
- __this_cpu_inc(hrtimer_interrupts);
|
|
|
|
-}
|
|
|
|
|
|
+ unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
|
|
|
|
|
|
-/*
|
|
|
|
- * These two functions are mostly architecture specific
|
|
|
|
- * defining them as weak here.
|
|
|
|
- */
|
|
|
|
-int __weak watchdog_nmi_enable(unsigned int cpu)
|
|
|
|
-{
|
|
|
|
- return 0;
|
|
|
|
|
|
+ if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
|
|
|
|
+ return true;
|
|
|
|
+
|
|
|
|
+ __this_cpu_write(hrtimer_interrupts_saved, hrint);
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
-void __weak watchdog_nmi_disable(unsigned int cpu)
|
|
|
|
|
|
+
|
|
|
|
+static void watchdog_interrupt_count(void)
|
|
{
|
|
{
|
|
|
|
+ __this_cpu_inc(hrtimer_interrupts);
|
|
}
|
|
}
|
|
|
|
|
|
static int watchdog_enable_all_cpus(void);
|
|
static int watchdog_enable_all_cpus(void);
|
|
@@ -502,57 +549,6 @@ static void watchdog_unpark_threads(void)
|
|
kthread_unpark(per_cpu(softlockup_watchdog, cpu));
|
|
kthread_unpark(per_cpu(softlockup_watchdog, cpu));
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Suspend the hard and soft lockup detector by parking the watchdog threads.
|
|
|
|
- */
|
|
|
|
-int lockup_detector_suspend(void)
|
|
|
|
-{
|
|
|
|
- int ret = 0;
|
|
|
|
-
|
|
|
|
- get_online_cpus();
|
|
|
|
- mutex_lock(&watchdog_proc_mutex);
|
|
|
|
- /*
|
|
|
|
- * Multiple suspend requests can be active in parallel (counted by
|
|
|
|
- * the 'watchdog_suspended' variable). If the watchdog threads are
|
|
|
|
- * running, the first caller takes care that they will be parked.
|
|
|
|
- * The state of 'watchdog_running' cannot change while a suspend
|
|
|
|
- * request is active (see related code in 'proc' handlers).
|
|
|
|
- */
|
|
|
|
- if (watchdog_running && !watchdog_suspended)
|
|
|
|
- ret = watchdog_park_threads();
|
|
|
|
-
|
|
|
|
- if (ret == 0)
|
|
|
|
- watchdog_suspended++;
|
|
|
|
- else {
|
|
|
|
- watchdog_disable_all_cpus();
|
|
|
|
- pr_err("Failed to suspend lockup detectors, disabled\n");
|
|
|
|
- watchdog_enabled = 0;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- mutex_unlock(&watchdog_proc_mutex);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Resume the hard and soft lockup detector by unparking the watchdog threads.
|
|
|
|
- */
|
|
|
|
-void lockup_detector_resume(void)
|
|
|
|
-{
|
|
|
|
- mutex_lock(&watchdog_proc_mutex);
|
|
|
|
-
|
|
|
|
- watchdog_suspended--;
|
|
|
|
- /*
|
|
|
|
- * The watchdog threads are unparked if they were previously running
|
|
|
|
- * and if there is no more active suspend request.
|
|
|
|
- */
|
|
|
|
- if (watchdog_running && !watchdog_suspended)
|
|
|
|
- watchdog_unpark_threads();
|
|
|
|
-
|
|
|
|
- mutex_unlock(&watchdog_proc_mutex);
|
|
|
|
- put_online_cpus();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int update_watchdog_all_cpus(void)
|
|
static int update_watchdog_all_cpus(void)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
@@ -604,6 +600,81 @@ static void watchdog_disable_all_cpus(void)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#else /* SOFTLOCKUP */
|
|
|
|
+static int watchdog_park_threads(void)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void watchdog_unpark_threads(void)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int watchdog_enable_all_cpus(void)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void watchdog_disable_all_cpus(void)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void set_sample_period(void)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif /* SOFTLOCKUP */
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Suspend the hard and soft lockup detector by parking the watchdog threads.
|
|
|
|
+ */
|
|
|
|
+int lockup_detector_suspend(void)
|
|
|
|
+{
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ get_online_cpus();
|
|
|
|
+ mutex_lock(&watchdog_proc_mutex);
|
|
|
|
+ /*
|
|
|
|
+ * Multiple suspend requests can be active in parallel (counted by
|
|
|
|
+ * the 'watchdog_suspended' variable). If the watchdog threads are
|
|
|
|
+ * running, the first caller takes care that they will be parked.
|
|
|
|
+ * The state of 'watchdog_running' cannot change while a suspend
|
|
|
|
+ * request is active (see related code in 'proc' handlers).
|
|
|
|
+ */
|
|
|
|
+ if (watchdog_running && !watchdog_suspended)
|
|
|
|
+ ret = watchdog_park_threads();
|
|
|
|
+
|
|
|
|
+ if (ret == 0)
|
|
|
|
+ watchdog_suspended++;
|
|
|
|
+ else {
|
|
|
|
+ watchdog_disable_all_cpus();
|
|
|
|
+ pr_err("Failed to suspend lockup detectors, disabled\n");
|
|
|
|
+ watchdog_enabled = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&watchdog_proc_mutex);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Resume the hard and soft lockup detector by unparking the watchdog threads.
|
|
|
|
+ */
|
|
|
|
+void lockup_detector_resume(void)
|
|
|
|
+{
|
|
|
|
+ mutex_lock(&watchdog_proc_mutex);
|
|
|
|
+
|
|
|
|
+ watchdog_suspended--;
|
|
|
|
+ /*
|
|
|
|
+ * The watchdog threads are unparked if they were previously running
|
|
|
|
+ * and if there is no more active suspend request.
|
|
|
|
+ */
|
|
|
|
+ if (watchdog_running && !watchdog_suspended)
|
|
|
|
+ watchdog_unpark_threads();
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&watchdog_proc_mutex);
|
|
|
|
+ put_online_cpus();
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_SYSCTL
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -810,9 +881,11 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
|
|
* a temporary cpumask, so we are likely not in a
|
|
* a temporary cpumask, so we are likely not in a
|
|
* position to do much else to make things better.
|
|
* position to do much else to make things better.
|
|
*/
|
|
*/
|
|
|
|
+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
|
|
if (smpboot_update_cpumask_percpu_thread(
|
|
if (smpboot_update_cpumask_percpu_thread(
|
|
&watchdog_threads, &watchdog_cpumask) != 0)
|
|
&watchdog_threads, &watchdog_cpumask) != 0)
|
|
pr_err("cpumask update failed\n");
|
|
pr_err("cpumask update failed\n");
|
|
|
|
+#endif
|
|
}
|
|
}
|
|
}
|
|
}
|
|
out:
|
|
out:
|