|
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
|
|
|
#define for_each_watchdog_cpu(cpu) \
|
|
|
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
|
|
|
|
|
|
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
|
|
|
+
|
|
|
/*
|
|
|
* The 'watchdog_running' variable is set to 1 when the watchdog threads
|
|
|
* are registered/started and is set to 0 when the watchdog threads are
|
|
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
|
|
int duration;
|
|
|
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
|
|
|
|
|
|
+ if (atomic_read(&watchdog_park_in_progress) != 0)
|
|
|
+ return HRTIMER_NORESTART;
|
|
|
+
|
|
|
/* kick the hardlockup detector */
|
|
|
watchdog_interrupt_count();
|
|
|
|
|
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
|
|
|
{
|
|
|
int cpu, ret = 0;
|
|
|
|
|
|
+ atomic_set(&watchdog_park_in_progress, 1);
|
|
|
+
|
|
|
for_each_watchdog_cpu(cpu) {
|
|
|
ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
|
|
|
if (ret)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ atomic_set(&watchdog_park_in_progress, 0);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|