watchdog.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * Detect hard and soft lockups on a system
  3. *
  4. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5. *
  6. * Note: Most of this code is borrowed heavily from the original softlockup
  7. * detector, so thanks to Ingo for the initial implementation.
  8. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  9. * to those contributors as well.
  10. */
  11. #define pr_fmt(fmt) "NMI watchdog: " fmt
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/nmi.h>
  15. #include <linux/init.h>
  16. #include <linux/delay.h>
  17. #include <linux/freezer.h>
  18. #include <linux/kthread.h>
  19. #include <linux/lockdep.h>
  20. #include <linux/notifier.h>
  21. #include <linux/module.h>
  22. #include <linux/sysctl.h>
  23. #include <linux/smpboot.h>
  24. #include <linux/sched/rt.h>
  25. #include <asm/irq_regs.h>
  26. #include <linux/kvm_para.h>
  27. #include <linux/perf_event.h>
  28. int watchdog_user_enabled = 1;
  29. int __read_mostly watchdog_thresh = 10;
  30. #ifdef CONFIG_SMP
  31. int __read_mostly sysctl_softlockup_all_cpu_backtrace;
  32. #else
  33. #define sysctl_softlockup_all_cpu_backtrace 0
  34. #endif
  35. static int __read_mostly watchdog_running;
  36. static u64 __read_mostly sample_period;
  37. static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
  38. static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
  39. static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
  40. static DEFINE_PER_CPU(bool, softlockup_touch_sync);
  41. static DEFINE_PER_CPU(bool, soft_watchdog_warn);
  42. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
  43. static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
  44. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  45. static DEFINE_PER_CPU(bool, hard_watchdog_warn);
  46. static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
  47. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
  48. static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
  49. #endif
  50. static unsigned long soft_lockup_nmi_warn;
  51. /* boot commands */
  52. /*
  53. * Should we panic when a soft-lockup or hard-lockup occurs:
  54. */
  55. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  56. static int hardlockup_panic =
  57. CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
  58. static int __init hardlockup_panic_setup(char *str)
  59. {
  60. if (!strncmp(str, "panic", 5))
  61. hardlockup_panic = 1;
  62. else if (!strncmp(str, "nopanic", 7))
  63. hardlockup_panic = 0;
  64. else if (!strncmp(str, "0", 1))
  65. watchdog_user_enabled = 0;
  66. return 1;
  67. }
  68. __setup("nmi_watchdog=", hardlockup_panic_setup);
  69. #endif
  70. unsigned int __read_mostly softlockup_panic =
  71. CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
  72. static int __init softlockup_panic_setup(char *str)
  73. {
  74. softlockup_panic = simple_strtoul(str, NULL, 0);
  75. return 1;
  76. }
  77. __setup("softlockup_panic=", softlockup_panic_setup);
  78. static int __init nowatchdog_setup(char *str)
  79. {
  80. watchdog_user_enabled = 0;
  81. return 1;
  82. }
  83. __setup("nowatchdog", nowatchdog_setup);
  84. /* deprecated */
  85. static int __init nosoftlockup_setup(char *str)
  86. {
  87. watchdog_user_enabled = 0;
  88. return 1;
  89. }
  90. __setup("nosoftlockup", nosoftlockup_setup);
  91. /* */
  92. #ifdef CONFIG_SMP
  93. static int __init softlockup_all_cpu_backtrace_setup(char *str)
  94. {
  95. sysctl_softlockup_all_cpu_backtrace =
  96. !!simple_strtol(str, NULL, 0);
  97. return 1;
  98. }
  99. __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
  100. #endif
  101. /*
  102. * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  103. * lockups can have false positives under extreme conditions. So we generally
  104. * want a higher threshold for soft lockups than for hard lockups. So we couple
  105. * the thresholds with a factor: we make the soft threshold twice the amount of
  106. * time the hard threshold is.
  107. */
  108. static int get_softlockup_thresh(void)
  109. {
  110. return watchdog_thresh * 2;
  111. }
  112. /*
  113. * Returns seconds, approximately. We don't need nanosecond
  114. * resolution, and we don't need to waste time with a big divide when
  115. * 2^30ns == 1.074s.
  116. */
  117. static unsigned long get_timestamp(void)
  118. {
  119. return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
  120. }
  121. static void set_sample_period(void)
  122. {
  123. /*
  124. * convert watchdog_thresh from seconds to ns
  125. * the divide by 5 is to give hrtimer several chances (two
  126. * or three with the current relation between the soft
  127. * and hard thresholds) to increment before the
  128. * hardlockup detector generates a warning
  129. */
  130. sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
  131. }
  132. /* Commands for resetting the watchdog */
  133. static void __touch_watchdog(void)
  134. {
  135. __this_cpu_write(watchdog_touch_ts, get_timestamp());
  136. }
  137. void touch_softlockup_watchdog(void)
  138. {
  139. /*
  140. * Preemption can be enabled. It doesn't matter which CPU's timestamp
  141. * gets zeroed here, so use the raw_ operation.
  142. */
  143. raw_cpu_write(watchdog_touch_ts, 0);
  144. }
  145. EXPORT_SYMBOL(touch_softlockup_watchdog);
  146. void touch_all_softlockup_watchdogs(void)
  147. {
  148. int cpu;
  149. /*
  150. * this is done lockless
  151. * do we care if a 0 races with a timestamp?
  152. * all it means is the softlock check starts one cycle later
  153. */
  154. for_each_online_cpu(cpu)
  155. per_cpu(watchdog_touch_ts, cpu) = 0;
  156. }
  157. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  158. void touch_nmi_watchdog(void)
  159. {
  160. /*
  161. * Using __raw here because some code paths have
  162. * preemption enabled. If preemption is enabled
  163. * then interrupts should be enabled too, in which
  164. * case we shouldn't have to worry about the watchdog
  165. * going off.
  166. */
  167. __raw_get_cpu_var(watchdog_nmi_touch) = true;
  168. touch_softlockup_watchdog();
  169. }
  170. EXPORT_SYMBOL(touch_nmi_watchdog);
  171. #endif
  172. void touch_softlockup_watchdog_sync(void)
  173. {
  174. __raw_get_cpu_var(softlockup_touch_sync) = true;
  175. __raw_get_cpu_var(watchdog_touch_ts) = 0;
  176. }
  177. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  178. /* watchdog detector functions */
  179. static int is_hardlockup(void)
  180. {
  181. unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
  182. if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
  183. return 1;
  184. __this_cpu_write(hrtimer_interrupts_saved, hrint);
  185. return 0;
  186. }
  187. #endif
  188. static int is_softlockup(unsigned long touch_ts)
  189. {
  190. unsigned long now = get_timestamp();
  191. /* Warn about unreasonable delays: */
  192. if (time_after(now, touch_ts + get_softlockup_thresh()))
  193. return now - touch_ts;
  194. return 0;
  195. }
  196. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  197. static struct perf_event_attr wd_hw_attr = {
  198. .type = PERF_TYPE_HARDWARE,
  199. .config = PERF_COUNT_HW_CPU_CYCLES,
  200. .size = sizeof(struct perf_event_attr),
  201. .pinned = 1,
  202. .disabled = 1,
  203. };
  204. /* Callback function for perf event subsystem */
  205. static void watchdog_overflow_callback(struct perf_event *event,
  206. struct perf_sample_data *data,
  207. struct pt_regs *regs)
  208. {
  209. /* Ensure the watchdog never gets throttled */
  210. event->hw.interrupts = 0;
  211. if (__this_cpu_read(watchdog_nmi_touch) == true) {
  212. __this_cpu_write(watchdog_nmi_touch, false);
  213. return;
  214. }
  215. /* check for a hardlockup
  216. * This is done by making sure our timer interrupt
  217. * is incrementing. The timer interrupt should have
  218. * fired multiple times before we overflow'd. If it hasn't
  219. * then this is a good indication the cpu is stuck
  220. */
  221. if (is_hardlockup()) {
  222. int this_cpu = smp_processor_id();
  223. /* only print hardlockups once */
  224. if (__this_cpu_read(hard_watchdog_warn) == true)
  225. return;
  226. if (hardlockup_panic)
  227. panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
  228. else
  229. WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
  230. __this_cpu_write(hard_watchdog_warn, true);
  231. return;
  232. }
  233. __this_cpu_write(hard_watchdog_warn, false);
  234. return;
  235. }
  236. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  237. static void watchdog_interrupt_count(void)
  238. {
  239. __this_cpu_inc(hrtimer_interrupts);
  240. }
  241. static int watchdog_nmi_enable(unsigned int cpu);
  242. static void watchdog_nmi_disable(unsigned int cpu);
  243. /* watchdog kicker functions */
  244. static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
  245. {
  246. unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
  247. struct pt_regs *regs = get_irq_regs();
  248. int duration;
  249. int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
  250. /* kick the hardlockup detector */
  251. watchdog_interrupt_count();
  252. /* kick the softlockup detector */
  253. wake_up_process(__this_cpu_read(softlockup_watchdog));
  254. /* .. and repeat */
  255. hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
  256. if (touch_ts == 0) {
  257. if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
  258. /*
  259. * If the time stamp was touched atomically
  260. * make sure the scheduler tick is up to date.
  261. */
  262. __this_cpu_write(softlockup_touch_sync, false);
  263. sched_clock_tick();
  264. }
  265. /* Clear the guest paused flag on watchdog reset */
  266. kvm_check_and_clear_guest_paused();
  267. __touch_watchdog();
  268. return HRTIMER_RESTART;
  269. }
  270. /* check for a softlockup
  271. * This is done by making sure a high priority task is
  272. * being scheduled. The task touches the watchdog to
  273. * indicate it is getting cpu time. If it hasn't then
  274. * this is a good indication some task is hogging the cpu
  275. */
  276. duration = is_softlockup(touch_ts);
  277. if (unlikely(duration)) {
  278. /*
  279. * If a virtual machine is stopped by the host it can look to
  280. * the watchdog like a soft lockup, check to see if the host
  281. * stopped the vm before we issue the warning
  282. */
  283. if (kvm_check_and_clear_guest_paused())
  284. return HRTIMER_RESTART;
  285. /* only warn once */
  286. if (__this_cpu_read(soft_watchdog_warn) == true)
  287. return HRTIMER_RESTART;
  288. if (softlockup_all_cpu_backtrace) {
  289. /* Prevent multiple soft-lockup reports if one cpu is already
  290. * engaged in dumping cpu back traces
  291. */
  292. if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
  293. /* Someone else will report us. Let's give up */
  294. __this_cpu_write(soft_watchdog_warn, true);
  295. return HRTIMER_RESTART;
  296. }
  297. }
  298. printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
  299. smp_processor_id(), duration,
  300. current->comm, task_pid_nr(current));
  301. print_modules();
  302. print_irqtrace_events(current);
  303. if (regs)
  304. show_regs(regs);
  305. else
  306. dump_stack();
  307. if (softlockup_all_cpu_backtrace) {
  308. /* Avoid generating two back traces for current
  309. * given that one is already made above
  310. */
  311. trigger_allbutself_cpu_backtrace();
  312. clear_bit(0, &soft_lockup_nmi_warn);
  313. /* Barrier to sync with other cpus */
  314. smp_mb__after_atomic();
  315. }
  316. if (softlockup_panic)
  317. panic("softlockup: hung tasks");
  318. __this_cpu_write(soft_watchdog_warn, true);
  319. } else
  320. __this_cpu_write(soft_watchdog_warn, false);
  321. return HRTIMER_RESTART;
  322. }
  323. static void watchdog_set_prio(unsigned int policy, unsigned int prio)
  324. {
  325. struct sched_param param = { .sched_priority = prio };
  326. sched_setscheduler(current, policy, &param);
  327. }
  328. static void watchdog_enable(unsigned int cpu)
  329. {
  330. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  331. /* kick off the timer for the hardlockup detector */
  332. hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  333. hrtimer->function = watchdog_timer_fn;
  334. /* Enable the perf event */
  335. watchdog_nmi_enable(cpu);
  336. /* done here because hrtimer_start can only pin to smp_processor_id() */
  337. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  338. HRTIMER_MODE_REL_PINNED);
  339. /* initialize timestamp */
  340. watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
  341. __touch_watchdog();
  342. }
  343. static void watchdog_disable(unsigned int cpu)
  344. {
  345. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  346. watchdog_set_prio(SCHED_NORMAL, 0);
  347. hrtimer_cancel(hrtimer);
  348. /* disable the perf event */
  349. watchdog_nmi_disable(cpu);
  350. }
  351. static void watchdog_cleanup(unsigned int cpu, bool online)
  352. {
  353. watchdog_disable(cpu);
  354. }
  355. static int watchdog_should_run(unsigned int cpu)
  356. {
  357. return __this_cpu_read(hrtimer_interrupts) !=
  358. __this_cpu_read(soft_lockup_hrtimer_cnt);
  359. }
  360. /*
  361. * The watchdog thread function - touches the timestamp.
  362. *
  363. * It only runs once every sample_period seconds (4 seconds by
  364. * default) to reset the softlockup timestamp. If this gets delayed
  365. * for more than 2*watchdog_thresh seconds then the debug-printout
  366. * triggers in watchdog_timer_fn().
  367. */
  368. static void watchdog(unsigned int cpu)
  369. {
  370. __this_cpu_write(soft_lockup_hrtimer_cnt,
  371. __this_cpu_read(hrtimer_interrupts));
  372. __touch_watchdog();
  373. }
  374. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  375. /*
  376. * People like the simple clean cpu node info on boot.
  377. * Reduce the watchdog noise by only printing messages
  378. * that are different from what cpu0 displayed.
  379. */
  380. static unsigned long cpu0_err;
  381. static int watchdog_nmi_enable(unsigned int cpu)
  382. {
  383. struct perf_event_attr *wd_attr;
  384. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  385. /* is it already setup and enabled? */
  386. if (event && event->state > PERF_EVENT_STATE_OFF)
  387. goto out;
  388. /* it is setup but not enabled */
  389. if (event != NULL)
  390. goto out_enable;
  391. wd_attr = &wd_hw_attr;
  392. wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
  393. /* Try to register using hardware perf events */
  394. event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
  395. /* save cpu0 error for future comparision */
  396. if (cpu == 0 && IS_ERR(event))
  397. cpu0_err = PTR_ERR(event);
  398. if (!IS_ERR(event)) {
  399. /* only print for cpu0 or different than cpu0 */
  400. if (cpu == 0 || cpu0_err)
  401. pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
  402. goto out_save;
  403. }
  404. /* skip displaying the same error again */
  405. if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
  406. return PTR_ERR(event);
  407. /* vary the KERN level based on the returned errno */
  408. if (PTR_ERR(event) == -EOPNOTSUPP)
  409. pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
  410. else if (PTR_ERR(event) == -ENOENT)
  411. pr_warning("disabled (cpu%i): hardware events not enabled\n",
  412. cpu);
  413. else
  414. pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
  415. cpu, PTR_ERR(event));
  416. return PTR_ERR(event);
  417. /* success path */
  418. out_save:
  419. per_cpu(watchdog_ev, cpu) = event;
  420. out_enable:
  421. perf_event_enable(per_cpu(watchdog_ev, cpu));
  422. out:
  423. return 0;
  424. }
  425. static void watchdog_nmi_disable(unsigned int cpu)
  426. {
  427. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  428. if (event) {
  429. perf_event_disable(event);
  430. per_cpu(watchdog_ev, cpu) = NULL;
  431. /* should be in cleanup, but blocks oprofile */
  432. perf_event_release_kernel(event);
  433. }
  434. return;
  435. }
  436. #else
  437. static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
  438. static void watchdog_nmi_disable(unsigned int cpu) { return; }
  439. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  440. static struct smp_hotplug_thread watchdog_threads = {
  441. .store = &softlockup_watchdog,
  442. .thread_should_run = watchdog_should_run,
  443. .thread_fn = watchdog,
  444. .thread_comm = "watchdog/%u",
  445. .setup = watchdog_enable,
  446. .cleanup = watchdog_cleanup,
  447. .park = watchdog_disable,
  448. .unpark = watchdog_enable,
  449. };
  450. static void restart_watchdog_hrtimer(void *info)
  451. {
  452. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  453. int ret;
  454. /*
  455. * No need to cancel and restart hrtimer if it is currently executing
  456. * because it will reprogram itself with the new period now.
  457. * We should never see it unqueued here because we are running per-cpu
  458. * with interrupts disabled.
  459. */
  460. ret = hrtimer_try_to_cancel(hrtimer);
  461. if (ret == 1)
  462. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  463. HRTIMER_MODE_REL_PINNED);
  464. }
  465. static void update_timers(int cpu)
  466. {
  467. /*
  468. * Make sure that perf event counter will adopt to a new
  469. * sampling period. Updating the sampling period directly would
  470. * be much nicer but we do not have an API for that now so
  471. * let's use a big hammer.
  472. * Hrtimer will adopt the new period on the next tick but this
  473. * might be late already so we have to restart the timer as well.
  474. */
  475. watchdog_nmi_disable(cpu);
  476. smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
  477. watchdog_nmi_enable(cpu);
  478. }
  479. static void update_timers_all_cpus(void)
  480. {
  481. int cpu;
  482. get_online_cpus();
  483. for_each_online_cpu(cpu)
  484. update_timers(cpu);
  485. put_online_cpus();
  486. }
  487. static int watchdog_enable_all_cpus(bool sample_period_changed)
  488. {
  489. int err = 0;
  490. if (!watchdog_running) {
  491. err = smpboot_register_percpu_thread(&watchdog_threads);
  492. if (err)
  493. pr_err("Failed to create watchdog threads, disabled\n");
  494. else
  495. watchdog_running = 1;
  496. } else if (sample_period_changed) {
  497. update_timers_all_cpus();
  498. }
  499. return err;
  500. }
  501. /* prepare/enable/disable routines */
  502. /* sysctl functions */
  503. #ifdef CONFIG_SYSCTL
  504. static void watchdog_disable_all_cpus(void)
  505. {
  506. if (watchdog_running) {
  507. watchdog_running = 0;
  508. smpboot_unregister_percpu_thread(&watchdog_threads);
  509. }
  510. }
  511. /*
  512. * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
  513. */
  514. int proc_dowatchdog(struct ctl_table *table, int write,
  515. void __user *buffer, size_t *lenp, loff_t *ppos)
  516. {
  517. int err, old_thresh, old_enabled;
  518. static DEFINE_MUTEX(watchdog_proc_mutex);
  519. mutex_lock(&watchdog_proc_mutex);
  520. old_thresh = ACCESS_ONCE(watchdog_thresh);
  521. old_enabled = ACCESS_ONCE(watchdog_user_enabled);
  522. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  523. if (err || !write)
  524. goto out;
  525. set_sample_period();
  526. /*
  527. * Watchdog threads shouldn't be enabled if they are
  528. * disabled. The 'watchdog_running' variable check in
  529. * watchdog_*_all_cpus() function takes care of this.
  530. */
  531. if (watchdog_user_enabled && watchdog_thresh)
  532. err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
  533. else
  534. watchdog_disable_all_cpus();
  535. /* Restore old values on failure */
  536. if (err) {
  537. watchdog_thresh = old_thresh;
  538. watchdog_user_enabled = old_enabled;
  539. }
  540. out:
  541. mutex_unlock(&watchdog_proc_mutex);
  542. return err;
  543. }
  544. #endif /* CONFIG_SYSCTL */
  545. void __init lockup_detector_init(void)
  546. {
  547. set_sample_period();
  548. if (watchdog_user_enabled)
  549. watchdog_enable_all_cpus(false);
  550. }