watchdog.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /*
  2. * Detect hard and soft lockups on a system
  3. *
  4. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5. *
  6. * Note: Most of this code is borrowed heavily from the original softlockup
  7. * detector, so thanks to Ingo for the initial implementation.
  8. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  9. * to those contributors as well.
  10. */
  11. #define pr_fmt(fmt) "NMI watchdog: " fmt
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/nmi.h>
  15. #include <linux/init.h>
  16. #include <linux/delay.h>
  17. #include <linux/freezer.h>
  18. #include <linux/kthread.h>
  19. #include <linux/lockdep.h>
  20. #include <linux/notifier.h>
  21. #include <linux/module.h>
  22. #include <linux/sysctl.h>
  23. #include <linux/smpboot.h>
  24. #include <linux/sched/rt.h>
  25. #include <asm/irq_regs.h>
  26. #include <linux/kvm_para.h>
  27. #include <linux/perf_event.h>
  28. int watchdog_user_enabled = 1;
  29. int __read_mostly watchdog_thresh = 10;
  30. #ifdef CONFIG_SMP
  31. int __read_mostly sysctl_softlockup_all_cpu_backtrace;
  32. #else
  33. #define sysctl_softlockup_all_cpu_backtrace 0
  34. #endif
  35. static int __read_mostly watchdog_running;
  36. static u64 __read_mostly sample_period;
  37. static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
  38. static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
  39. static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
  40. static DEFINE_PER_CPU(bool, softlockup_touch_sync);
  41. static DEFINE_PER_CPU(bool, soft_watchdog_warn);
  42. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
  43. static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
  44. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  45. static DEFINE_PER_CPU(bool, hard_watchdog_warn);
  46. static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
  47. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
  48. static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
  49. #endif
  50. static unsigned long soft_lockup_nmi_warn;
  51. /* boot commands */
  52. /*
  53. * Should we panic when a soft-lockup or hard-lockup occurs:
  54. */
  55. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  56. static int hardlockup_panic =
  57. CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
  58. static int __init hardlockup_panic_setup(char *str)
  59. {
  60. if (!strncmp(str, "panic", 5))
  61. hardlockup_panic = 1;
  62. else if (!strncmp(str, "nopanic", 7))
  63. hardlockup_panic = 0;
  64. else if (!strncmp(str, "0", 1))
  65. watchdog_user_enabled = 0;
  66. return 1;
  67. }
  68. __setup("nmi_watchdog=", hardlockup_panic_setup);
  69. #endif
  70. unsigned int __read_mostly softlockup_panic =
  71. CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
  72. static int __init softlockup_panic_setup(char *str)
  73. {
  74. softlockup_panic = simple_strtoul(str, NULL, 0);
  75. return 1;
  76. }
  77. __setup("softlockup_panic=", softlockup_panic_setup);
  78. static int __init nowatchdog_setup(char *str)
  79. {
  80. watchdog_user_enabled = 0;
  81. return 1;
  82. }
  83. __setup("nowatchdog", nowatchdog_setup);
  84. /* deprecated */
  85. static int __init nosoftlockup_setup(char *str)
  86. {
  87. watchdog_user_enabled = 0;
  88. return 1;
  89. }
  90. __setup("nosoftlockup", nosoftlockup_setup);
  91. /* */
  92. #ifdef CONFIG_SMP
  93. static int __init softlockup_all_cpu_backtrace_setup(char *str)
  94. {
  95. sysctl_softlockup_all_cpu_backtrace =
  96. !!simple_strtol(str, NULL, 0);
  97. return 1;
  98. }
  99. __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
  100. #endif
  101. /*
  102. * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  103. * lockups can have false positives under extreme conditions. So we generally
  104. * want a higher threshold for soft lockups than for hard lockups. So we couple
  105. * the thresholds with a factor: we make the soft threshold twice the amount of
  106. * time the hard threshold is.
  107. */
  108. static int get_softlockup_thresh(void)
  109. {
  110. return watchdog_thresh * 2;
  111. }
  112. /*
  113. * Returns seconds, approximately. We don't need nanosecond
  114. * resolution, and we don't need to waste time with a big divide when
  115. * 2^30ns == 1.074s.
  116. */
  117. static unsigned long get_timestamp(void)
  118. {
  119. return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
  120. }
  121. static void set_sample_period(void)
  122. {
  123. /*
  124. * convert watchdog_thresh from seconds to ns
  125. * the divide by 5 is to give hrtimer several chances (two
  126. * or three with the current relation between the soft
  127. * and hard thresholds) to increment before the
  128. * hardlockup detector generates a warning
  129. */
  130. sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
  131. }
  132. /* Commands for resetting the watchdog */
  133. static void __touch_watchdog(void)
  134. {
  135. __this_cpu_write(watchdog_touch_ts, get_timestamp());
  136. }
  137. void touch_softlockup_watchdog(void)
  138. {
  139. /*
  140. * Preemption can be enabled. It doesn't matter which CPU's timestamp
  141. * gets zeroed here, so use the raw_ operation.
  142. */
  143. raw_cpu_write(watchdog_touch_ts, 0);
  144. }
  145. EXPORT_SYMBOL(touch_softlockup_watchdog);
  146. void touch_all_softlockup_watchdogs(void)
  147. {
  148. int cpu;
  149. /*
  150. * this is done lockless
  151. * do we care if a 0 races with a timestamp?
  152. * all it means is the softlock check starts one cycle later
  153. */
  154. for_each_online_cpu(cpu)
  155. per_cpu(watchdog_touch_ts, cpu) = 0;
  156. }
  157. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  158. void touch_nmi_watchdog(void)
  159. {
  160. /*
  161. * Using __raw here because some code paths have
  162. * preemption enabled. If preemption is enabled
  163. * then interrupts should be enabled too, in which
  164. * case we shouldn't have to worry about the watchdog
  165. * going off.
  166. */
  167. __raw_get_cpu_var(watchdog_nmi_touch) = true;
  168. touch_softlockup_watchdog();
  169. }
  170. EXPORT_SYMBOL(touch_nmi_watchdog);
  171. #endif
  172. void touch_softlockup_watchdog_sync(void)
  173. {
  174. __raw_get_cpu_var(softlockup_touch_sync) = true;
  175. __raw_get_cpu_var(watchdog_touch_ts) = 0;
  176. }
  177. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  178. /* watchdog detector functions */
  179. static int is_hardlockup(void)
  180. {
  181. unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
  182. if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
  183. return 1;
  184. __this_cpu_write(hrtimer_interrupts_saved, hrint);
  185. return 0;
  186. }
  187. #endif
  188. static int is_softlockup(unsigned long touch_ts)
  189. {
  190. unsigned long now = get_timestamp();
  191. /* Warn about unreasonable delays: */
  192. if (time_after(now, touch_ts + get_softlockup_thresh()))
  193. return now - touch_ts;
  194. return 0;
  195. }
  196. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  197. static struct perf_event_attr wd_hw_attr = {
  198. .type = PERF_TYPE_HARDWARE,
  199. .config = PERF_COUNT_HW_CPU_CYCLES,
  200. .size = sizeof(struct perf_event_attr),
  201. .pinned = 1,
  202. .disabled = 1,
  203. };
  204. /* Callback function for perf event subsystem */
  205. static void watchdog_overflow_callback(struct perf_event *event,
  206. struct perf_sample_data *data,
  207. struct pt_regs *regs)
  208. {
  209. /* Ensure the watchdog never gets throttled */
  210. event->hw.interrupts = 0;
  211. if (__this_cpu_read(watchdog_nmi_touch) == true) {
  212. __this_cpu_write(watchdog_nmi_touch, false);
  213. return;
  214. }
  215. /* check for a hardlockup
  216. * This is done by making sure our timer interrupt
  217. * is incrementing. The timer interrupt should have
  218. * fired multiple times before we overflow'd. If it hasn't
  219. * then this is a good indication the cpu is stuck
  220. */
  221. if (is_hardlockup()) {
  222. int this_cpu = smp_processor_id();
  223. /* only print hardlockups once */
  224. if (__this_cpu_read(hard_watchdog_warn) == true)
  225. return;
  226. if (hardlockup_panic)
  227. panic("Watchdog detected hard LOCKUP on cpu %d",
  228. this_cpu);
  229. else
  230. WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
  231. this_cpu);
  232. __this_cpu_write(hard_watchdog_warn, true);
  233. return;
  234. }
  235. __this_cpu_write(hard_watchdog_warn, false);
  236. return;
  237. }
  238. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  239. static void watchdog_interrupt_count(void)
  240. {
  241. __this_cpu_inc(hrtimer_interrupts);
  242. }
  243. static int watchdog_nmi_enable(unsigned int cpu);
  244. static void watchdog_nmi_disable(unsigned int cpu);
  245. /* watchdog kicker functions */
  246. static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
  247. {
  248. unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
  249. struct pt_regs *regs = get_irq_regs();
  250. int duration;
  251. int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
  252. /* kick the hardlockup detector */
  253. watchdog_interrupt_count();
  254. /* kick the softlockup detector */
  255. wake_up_process(__this_cpu_read(softlockup_watchdog));
  256. /* .. and repeat */
  257. hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
  258. if (touch_ts == 0) {
  259. if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
  260. /*
  261. * If the time stamp was touched atomically
  262. * make sure the scheduler tick is up to date.
  263. */
  264. __this_cpu_write(softlockup_touch_sync, false);
  265. sched_clock_tick();
  266. }
  267. /* Clear the guest paused flag on watchdog reset */
  268. kvm_check_and_clear_guest_paused();
  269. __touch_watchdog();
  270. return HRTIMER_RESTART;
  271. }
  272. /* check for a softlockup
  273. * This is done by making sure a high priority task is
  274. * being scheduled. The task touches the watchdog to
  275. * indicate it is getting cpu time. If it hasn't then
  276. * this is a good indication some task is hogging the cpu
  277. */
  278. duration = is_softlockup(touch_ts);
  279. if (unlikely(duration)) {
  280. /*
  281. * If a virtual machine is stopped by the host it can look to
  282. * the watchdog like a soft lockup, check to see if the host
  283. * stopped the vm before we issue the warning
  284. */
  285. if (kvm_check_and_clear_guest_paused())
  286. return HRTIMER_RESTART;
  287. /* only warn once */
  288. if (__this_cpu_read(soft_watchdog_warn) == true)
  289. return HRTIMER_RESTART;
  290. if (softlockup_all_cpu_backtrace) {
  291. /* Prevent multiple soft-lockup reports if one cpu is already
  292. * engaged in dumping cpu back traces
  293. */
  294. if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
  295. /* Someone else will report us. Let's give up */
  296. __this_cpu_write(soft_watchdog_warn, true);
  297. return HRTIMER_RESTART;
  298. }
  299. }
  300. pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
  301. smp_processor_id(), duration,
  302. current->comm, task_pid_nr(current));
  303. print_modules();
  304. print_irqtrace_events(current);
  305. if (regs)
  306. show_regs(regs);
  307. else
  308. dump_stack();
  309. if (softlockup_all_cpu_backtrace) {
  310. /* Avoid generating two back traces for current
  311. * given that one is already made above
  312. */
  313. trigger_allbutself_cpu_backtrace();
  314. clear_bit(0, &soft_lockup_nmi_warn);
  315. /* Barrier to sync with other cpus */
  316. smp_mb__after_atomic();
  317. }
  318. add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
  319. if (softlockup_panic)
  320. panic("softlockup: hung tasks");
  321. __this_cpu_write(soft_watchdog_warn, true);
  322. } else
  323. __this_cpu_write(soft_watchdog_warn, false);
  324. return HRTIMER_RESTART;
  325. }
  326. static void watchdog_set_prio(unsigned int policy, unsigned int prio)
  327. {
  328. struct sched_param param = { .sched_priority = prio };
  329. sched_setscheduler(current, policy, &param);
  330. }
  331. static void watchdog_enable(unsigned int cpu)
  332. {
  333. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  334. /* kick off the timer for the hardlockup detector */
  335. hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  336. hrtimer->function = watchdog_timer_fn;
  337. /* Enable the perf event */
  338. watchdog_nmi_enable(cpu);
  339. /* done here because hrtimer_start can only pin to smp_processor_id() */
  340. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  341. HRTIMER_MODE_REL_PINNED);
  342. /* initialize timestamp */
  343. watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
  344. __touch_watchdog();
  345. }
  346. static void watchdog_disable(unsigned int cpu)
  347. {
  348. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  349. watchdog_set_prio(SCHED_NORMAL, 0);
  350. hrtimer_cancel(hrtimer);
  351. /* disable the perf event */
  352. watchdog_nmi_disable(cpu);
  353. }
  354. static void watchdog_cleanup(unsigned int cpu, bool online)
  355. {
  356. watchdog_disable(cpu);
  357. }
  358. static int watchdog_should_run(unsigned int cpu)
  359. {
  360. return __this_cpu_read(hrtimer_interrupts) !=
  361. __this_cpu_read(soft_lockup_hrtimer_cnt);
  362. }
  363. /*
  364. * The watchdog thread function - touches the timestamp.
  365. *
  366. * It only runs once every sample_period seconds (4 seconds by
  367. * default) to reset the softlockup timestamp. If this gets delayed
  368. * for more than 2*watchdog_thresh seconds then the debug-printout
  369. * triggers in watchdog_timer_fn().
  370. */
  371. static void watchdog(unsigned int cpu)
  372. {
  373. __this_cpu_write(soft_lockup_hrtimer_cnt,
  374. __this_cpu_read(hrtimer_interrupts));
  375. __touch_watchdog();
  376. }
  377. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  378. /*
  379. * People like the simple clean cpu node info on boot.
  380. * Reduce the watchdog noise by only printing messages
  381. * that are different from what cpu0 displayed.
  382. */
  383. static unsigned long cpu0_err;
  384. static int watchdog_nmi_enable(unsigned int cpu)
  385. {
  386. struct perf_event_attr *wd_attr;
  387. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  388. /* is it already setup and enabled? */
  389. if (event && event->state > PERF_EVENT_STATE_OFF)
  390. goto out;
  391. /* it is setup but not enabled */
  392. if (event != NULL)
  393. goto out_enable;
  394. wd_attr = &wd_hw_attr;
  395. wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
  396. /* Try to register using hardware perf events */
  397. event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
  398. /* save cpu0 error for future comparision */
  399. if (cpu == 0 && IS_ERR(event))
  400. cpu0_err = PTR_ERR(event);
  401. if (!IS_ERR(event)) {
  402. /* only print for cpu0 or different than cpu0 */
  403. if (cpu == 0 || cpu0_err)
  404. pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
  405. goto out_save;
  406. }
  407. /* skip displaying the same error again */
  408. if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
  409. return PTR_ERR(event);
  410. /* vary the KERN level based on the returned errno */
  411. if (PTR_ERR(event) == -EOPNOTSUPP)
  412. pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
  413. else if (PTR_ERR(event) == -ENOENT)
  414. pr_warn("disabled (cpu%i): hardware events not enabled\n",
  415. cpu);
  416. else
  417. pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
  418. cpu, PTR_ERR(event));
  419. return PTR_ERR(event);
  420. /* success path */
  421. out_save:
  422. per_cpu(watchdog_ev, cpu) = event;
  423. out_enable:
  424. perf_event_enable(per_cpu(watchdog_ev, cpu));
  425. out:
  426. return 0;
  427. }
  428. static void watchdog_nmi_disable(unsigned int cpu)
  429. {
  430. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  431. if (event) {
  432. perf_event_disable(event);
  433. per_cpu(watchdog_ev, cpu) = NULL;
  434. /* should be in cleanup, but blocks oprofile */
  435. perf_event_release_kernel(event);
  436. }
  437. return;
  438. }
  439. #else
  440. static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
  441. static void watchdog_nmi_disable(unsigned int cpu) { return; }
  442. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  443. static struct smp_hotplug_thread watchdog_threads = {
  444. .store = &softlockup_watchdog,
  445. .thread_should_run = watchdog_should_run,
  446. .thread_fn = watchdog,
  447. .thread_comm = "watchdog/%u",
  448. .setup = watchdog_enable,
  449. .cleanup = watchdog_cleanup,
  450. .park = watchdog_disable,
  451. .unpark = watchdog_enable,
  452. };
  453. static void restart_watchdog_hrtimer(void *info)
  454. {
  455. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  456. int ret;
  457. /*
  458. * No need to cancel and restart hrtimer if it is currently executing
  459. * because it will reprogram itself with the new period now.
  460. * We should never see it unqueued here because we are running per-cpu
  461. * with interrupts disabled.
  462. */
  463. ret = hrtimer_try_to_cancel(hrtimer);
  464. if (ret == 1)
  465. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  466. HRTIMER_MODE_REL_PINNED);
  467. }
  468. static void update_timers(int cpu)
  469. {
  470. /*
  471. * Make sure that perf event counter will adopt to a new
  472. * sampling period. Updating the sampling period directly would
  473. * be much nicer but we do not have an API for that now so
  474. * let's use a big hammer.
  475. * Hrtimer will adopt the new period on the next tick but this
  476. * might be late already so we have to restart the timer as well.
  477. */
  478. watchdog_nmi_disable(cpu);
  479. smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
  480. watchdog_nmi_enable(cpu);
  481. }
  482. static void update_timers_all_cpus(void)
  483. {
  484. int cpu;
  485. get_online_cpus();
  486. for_each_online_cpu(cpu)
  487. update_timers(cpu);
  488. put_online_cpus();
  489. }
  490. static int watchdog_enable_all_cpus(bool sample_period_changed)
  491. {
  492. int err = 0;
  493. if (!watchdog_running) {
  494. err = smpboot_register_percpu_thread(&watchdog_threads);
  495. if (err)
  496. pr_err("Failed to create watchdog threads, disabled\n");
  497. else
  498. watchdog_running = 1;
  499. } else if (sample_period_changed) {
  500. update_timers_all_cpus();
  501. }
  502. return err;
  503. }
  504. /* prepare/enable/disable routines */
  505. /* sysctl functions */
  506. #ifdef CONFIG_SYSCTL
  507. static void watchdog_disable_all_cpus(void)
  508. {
  509. if (watchdog_running) {
  510. watchdog_running = 0;
  511. smpboot_unregister_percpu_thread(&watchdog_threads);
  512. }
  513. }
  514. /*
  515. * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
  516. */
  517. int proc_dowatchdog(struct ctl_table *table, int write,
  518. void __user *buffer, size_t *lenp, loff_t *ppos)
  519. {
  520. int err, old_thresh, old_enabled;
  521. static DEFINE_MUTEX(watchdog_proc_mutex);
  522. mutex_lock(&watchdog_proc_mutex);
  523. old_thresh = ACCESS_ONCE(watchdog_thresh);
  524. old_enabled = ACCESS_ONCE(watchdog_user_enabled);
  525. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  526. if (err || !write)
  527. goto out;
  528. set_sample_period();
  529. /*
  530. * Watchdog threads shouldn't be enabled if they are
  531. * disabled. The 'watchdog_running' variable check in
  532. * watchdog_*_all_cpus() function takes care of this.
  533. */
  534. if (watchdog_user_enabled && watchdog_thresh)
  535. err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
  536. else
  537. watchdog_disable_all_cpus();
  538. /* Restore old values on failure */
  539. if (err) {
  540. watchdog_thresh = old_thresh;
  541. watchdog_user_enabled = old_enabled;
  542. }
  543. out:
  544. mutex_unlock(&watchdog_proc_mutex);
  545. return err;
  546. }
  547. #endif /* CONFIG_SYSCTL */
  548. void __init lockup_detector_init(void)
  549. {
  550. set_sample_period();
  551. if (watchdog_user_enabled)
  552. watchdog_enable_all_cpus(false);
  553. }