watchdog.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. /*
  2. * Detect hard and soft lockups on a system
  3. *
  4. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5. *
  6. * Note: Most of this code is borrowed heavily from the original softlockup
  7. * detector, so thanks to Ingo for the initial implementation.
  8. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  9. * to those contributors as well.
  10. */
  11. #define pr_fmt(fmt) "NMI watchdog: " fmt
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/nmi.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/smpboot.h>
  19. #include <linux/sched/rt.h>
  20. #include <linux/tick.h>
  21. #include <asm/irq_regs.h>
  22. #include <linux/kvm_para.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/kthread.h>
  25. /*
  26. * The run state of the lockup detectors is controlled by the content of the
  27. * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
  28. * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
  29. *
  30. * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
  31. * are variables that are only used as an 'interface' between the parameters
  32. * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
  33. * 'watchdog_thresh' variable is handled differently because its value is not
  34. * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
  35. * is equal zero.
  36. */
  37. #define NMI_WATCHDOG_ENABLED_BIT 0
  38. #define SOFT_WATCHDOG_ENABLED_BIT 1
  39. #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
  40. #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
  41. static DEFINE_MUTEX(watchdog_proc_mutex);
  42. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  43. static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
  44. #else
  45. static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
  46. #endif
  47. int __read_mostly nmi_watchdog_enabled;
  48. int __read_mostly soft_watchdog_enabled;
  49. int __read_mostly watchdog_user_enabled;
  50. int __read_mostly watchdog_thresh = 10;
  51. #ifdef CONFIG_SMP
  52. int __read_mostly sysctl_softlockup_all_cpu_backtrace;
  53. int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
  54. #else
  55. #define sysctl_softlockup_all_cpu_backtrace 0
  56. #define sysctl_hardlockup_all_cpu_backtrace 0
  57. #endif
  58. static struct cpumask watchdog_cpumask __read_mostly;
  59. unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
  60. /* Helper for online, unparked cpus. */
  61. #define for_each_watchdog_cpu(cpu) \
  62. for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
  63. /*
  64. * The 'watchdog_running' variable is set to 1 when the watchdog threads
  65. * are registered/started and is set to 0 when the watchdog threads are
  66. * unregistered/stopped, so it is an indicator whether the threads exist.
  67. */
  68. static int __read_mostly watchdog_running;
  69. /*
  70. * If a subsystem has a need to deactivate the watchdog temporarily, it
  71. * can use the suspend/resume interface to achieve this. The content of
  72. * the 'watchdog_suspended' variable reflects this state. Existing threads
  73. * are parked/unparked by the lockup_detector_{suspend|resume} functions
  74. * (see comment blocks pertaining to those functions for further details).
  75. *
  76. * 'watchdog_suspended' also prevents threads from being registered/started
  77. * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
  78. * of 'watchdog_running' cannot change while the watchdog is deactivated
  79. * temporarily (see related code in 'proc' handlers).
  80. */
  81. static int __read_mostly watchdog_suspended;
  82. static u64 __read_mostly sample_period;
  83. static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
  84. static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
  85. static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
  86. static DEFINE_PER_CPU(bool, softlockup_touch_sync);
  87. static DEFINE_PER_CPU(bool, soft_watchdog_warn);
  88. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
  89. static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
  90. static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
  91. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  92. static DEFINE_PER_CPU(bool, hard_watchdog_warn);
  93. static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
  94. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
  95. static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
  96. #endif
  97. static unsigned long soft_lockup_nmi_warn;
  98. /* boot commands */
  99. /*
  100. * Should we panic when a soft-lockup or hard-lockup occurs:
  101. */
  102. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  103. unsigned int __read_mostly hardlockup_panic =
  104. CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
  105. static unsigned long hardlockup_allcpu_dumped;
  106. /*
  107. * We may not want to enable hard lockup detection by default in all cases,
  108. * for example when running the kernel as a guest on a hypervisor. In these
  109. * cases this function can be called to disable hard lockup detection. This
  110. * function should only be executed once by the boot processor before the
  111. * kernel command line parameters are parsed, because otherwise it is not
  112. * possible to override this in hardlockup_panic_setup().
  113. */
  114. void hardlockup_detector_disable(void)
  115. {
  116. watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
  117. }
  118. static int __init hardlockup_panic_setup(char *str)
  119. {
  120. if (!strncmp(str, "panic", 5))
  121. hardlockup_panic = 1;
  122. else if (!strncmp(str, "nopanic", 7))
  123. hardlockup_panic = 0;
  124. else if (!strncmp(str, "0", 1))
  125. watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
  126. else if (!strncmp(str, "1", 1))
  127. watchdog_enabled |= NMI_WATCHDOG_ENABLED;
  128. return 1;
  129. }
  130. __setup("nmi_watchdog=", hardlockup_panic_setup);
  131. #endif
  132. unsigned int __read_mostly softlockup_panic =
  133. CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
  134. static int __init softlockup_panic_setup(char *str)
  135. {
  136. softlockup_panic = simple_strtoul(str, NULL, 0);
  137. return 1;
  138. }
  139. __setup("softlockup_panic=", softlockup_panic_setup);
  140. static int __init nowatchdog_setup(char *str)
  141. {
  142. watchdog_enabled = 0;
  143. return 1;
  144. }
  145. __setup("nowatchdog", nowatchdog_setup);
  146. static int __init nosoftlockup_setup(char *str)
  147. {
  148. watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
  149. return 1;
  150. }
  151. __setup("nosoftlockup", nosoftlockup_setup);
  152. #ifdef CONFIG_SMP
  153. static int __init softlockup_all_cpu_backtrace_setup(char *str)
  154. {
  155. sysctl_softlockup_all_cpu_backtrace =
  156. !!simple_strtol(str, NULL, 0);
  157. return 1;
  158. }
  159. __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
  160. static int __init hardlockup_all_cpu_backtrace_setup(char *str)
  161. {
  162. sysctl_hardlockup_all_cpu_backtrace =
  163. !!simple_strtol(str, NULL, 0);
  164. return 1;
  165. }
  166. __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
  167. #endif
  168. /*
  169. * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  170. * lockups can have false positives under extreme conditions. So we generally
  171. * want a higher threshold for soft lockups than for hard lockups. So we couple
  172. * the thresholds with a factor: we make the soft threshold twice the amount of
  173. * time the hard threshold is.
  174. */
  175. static int get_softlockup_thresh(void)
  176. {
  177. return watchdog_thresh * 2;
  178. }
  179. /*
  180. * Returns seconds, approximately. We don't need nanosecond
  181. * resolution, and we don't need to waste time with a big divide when
  182. * 2^30ns == 1.074s.
  183. */
  184. static unsigned long get_timestamp(void)
  185. {
  186. return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
  187. }
  188. static void set_sample_period(void)
  189. {
  190. /*
  191. * convert watchdog_thresh from seconds to ns
  192. * the divide by 5 is to give hrtimer several chances (two
  193. * or three with the current relation between the soft
  194. * and hard thresholds) to increment before the
  195. * hardlockup detector generates a warning
  196. */
  197. sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
  198. }
  199. /* Commands for resetting the watchdog */
  200. static void __touch_watchdog(void)
  201. {
  202. __this_cpu_write(watchdog_touch_ts, get_timestamp());
  203. }
  204. void touch_softlockup_watchdog(void)
  205. {
  206. /*
  207. * Preemption can be enabled. It doesn't matter which CPU's timestamp
  208. * gets zeroed here, so use the raw_ operation.
  209. */
  210. raw_cpu_write(watchdog_touch_ts, 0);
  211. }
  212. EXPORT_SYMBOL(touch_softlockup_watchdog);
  213. void touch_all_softlockup_watchdogs(void)
  214. {
  215. int cpu;
  216. /*
  217. * this is done lockless
  218. * do we care if a 0 races with a timestamp?
  219. * all it means is the softlock check starts one cycle later
  220. */
  221. for_each_watchdog_cpu(cpu)
  222. per_cpu(watchdog_touch_ts, cpu) = 0;
  223. }
  224. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  225. void touch_nmi_watchdog(void)
  226. {
  227. /*
  228. * Using __raw here because some code paths have
  229. * preemption enabled. If preemption is enabled
  230. * then interrupts should be enabled too, in which
  231. * case we shouldn't have to worry about the watchdog
  232. * going off.
  233. */
  234. raw_cpu_write(watchdog_nmi_touch, true);
  235. touch_softlockup_watchdog();
  236. }
  237. EXPORT_SYMBOL(touch_nmi_watchdog);
  238. #endif
  239. void touch_softlockup_watchdog_sync(void)
  240. {
  241. __this_cpu_write(softlockup_touch_sync, true);
  242. __this_cpu_write(watchdog_touch_ts, 0);
  243. }
  244. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  245. /* watchdog detector functions */
  246. static bool is_hardlockup(void)
  247. {
  248. unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
  249. if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
  250. return true;
  251. __this_cpu_write(hrtimer_interrupts_saved, hrint);
  252. return false;
  253. }
  254. #endif
  255. static int is_softlockup(unsigned long touch_ts)
  256. {
  257. unsigned long now = get_timestamp();
  258. if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
  259. /* Warn about unreasonable delays. */
  260. if (time_after(now, touch_ts + get_softlockup_thresh()))
  261. return now - touch_ts;
  262. }
  263. return 0;
  264. }
  265. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  266. static struct perf_event_attr wd_hw_attr = {
  267. .type = PERF_TYPE_HARDWARE,
  268. .config = PERF_COUNT_HW_CPU_CYCLES,
  269. .size = sizeof(struct perf_event_attr),
  270. .pinned = 1,
  271. .disabled = 1,
  272. };
  273. /* Callback function for perf event subsystem */
  274. static void watchdog_overflow_callback(struct perf_event *event,
  275. struct perf_sample_data *data,
  276. struct pt_regs *regs)
  277. {
  278. /* Ensure the watchdog never gets throttled */
  279. event->hw.interrupts = 0;
  280. if (__this_cpu_read(watchdog_nmi_touch) == true) {
  281. __this_cpu_write(watchdog_nmi_touch, false);
  282. return;
  283. }
  284. /* check for a hardlockup
  285. * This is done by making sure our timer interrupt
  286. * is incrementing. The timer interrupt should have
  287. * fired multiple times before we overflow'd. If it hasn't
  288. * then this is a good indication the cpu is stuck
  289. */
  290. if (is_hardlockup()) {
  291. int this_cpu = smp_processor_id();
  292. struct pt_regs *regs = get_irq_regs();
  293. /* only print hardlockups once */
  294. if (__this_cpu_read(hard_watchdog_warn) == true)
  295. return;
  296. pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
  297. print_modules();
  298. print_irqtrace_events(current);
  299. if (regs)
  300. show_regs(regs);
  301. else
  302. dump_stack();
  303. /*
  304. * Perform all-CPU dump only once to avoid multiple hardlockups
  305. * generating interleaving traces
  306. */
  307. if (sysctl_hardlockup_all_cpu_backtrace &&
  308. !test_and_set_bit(0, &hardlockup_allcpu_dumped))
  309. trigger_allbutself_cpu_backtrace();
  310. if (hardlockup_panic)
  311. panic("Hard LOCKUP");
  312. __this_cpu_write(hard_watchdog_warn, true);
  313. return;
  314. }
  315. __this_cpu_write(hard_watchdog_warn, false);
  316. return;
  317. }
  318. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  319. static void watchdog_interrupt_count(void)
  320. {
  321. __this_cpu_inc(hrtimer_interrupts);
  322. }
  323. static int watchdog_nmi_enable(unsigned int cpu);
  324. static void watchdog_nmi_disable(unsigned int cpu);
  325. static int watchdog_enable_all_cpus(void);
  326. static void watchdog_disable_all_cpus(void);
  327. /* watchdog kicker functions */
  328. static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
  329. {
  330. unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
  331. struct pt_regs *regs = get_irq_regs();
  332. int duration;
  333. int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
  334. /* kick the hardlockup detector */
  335. watchdog_interrupt_count();
  336. /* kick the softlockup detector */
  337. wake_up_process(__this_cpu_read(softlockup_watchdog));
  338. /* .. and repeat */
  339. hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
  340. if (touch_ts == 0) {
  341. if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
  342. /*
  343. * If the time stamp was touched atomically
  344. * make sure the scheduler tick is up to date.
  345. */
  346. __this_cpu_write(softlockup_touch_sync, false);
  347. sched_clock_tick();
  348. }
  349. /* Clear the guest paused flag on watchdog reset */
  350. kvm_check_and_clear_guest_paused();
  351. __touch_watchdog();
  352. return HRTIMER_RESTART;
  353. }
  354. /* check for a softlockup
  355. * This is done by making sure a high priority task is
  356. * being scheduled. The task touches the watchdog to
  357. * indicate it is getting cpu time. If it hasn't then
  358. * this is a good indication some task is hogging the cpu
  359. */
  360. duration = is_softlockup(touch_ts);
  361. if (unlikely(duration)) {
  362. /*
  363. * If a virtual machine is stopped by the host it can look to
  364. * the watchdog like a soft lockup, check to see if the host
  365. * stopped the vm before we issue the warning
  366. */
  367. if (kvm_check_and_clear_guest_paused())
  368. return HRTIMER_RESTART;
  369. /* only warn once */
  370. if (__this_cpu_read(soft_watchdog_warn) == true) {
  371. /*
  372. * When multiple processes are causing softlockups the
  373. * softlockup detector only warns on the first one
  374. * because the code relies on a full quiet cycle to
  375. * re-arm. The second process prevents the quiet cycle
  376. * and never gets reported. Use task pointers to detect
  377. * this.
  378. */
  379. if (__this_cpu_read(softlockup_task_ptr_saved) !=
  380. current) {
  381. __this_cpu_write(soft_watchdog_warn, false);
  382. __touch_watchdog();
  383. }
  384. return HRTIMER_RESTART;
  385. }
  386. if (softlockup_all_cpu_backtrace) {
  387. /* Prevent multiple soft-lockup reports if one cpu is already
  388. * engaged in dumping cpu back traces
  389. */
  390. if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
  391. /* Someone else will report us. Let's give up */
  392. __this_cpu_write(soft_watchdog_warn, true);
  393. return HRTIMER_RESTART;
  394. }
  395. }
  396. pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
  397. smp_processor_id(), duration,
  398. current->comm, task_pid_nr(current));
  399. __this_cpu_write(softlockup_task_ptr_saved, current);
  400. print_modules();
  401. print_irqtrace_events(current);
  402. if (regs)
  403. show_regs(regs);
  404. else
  405. dump_stack();
  406. if (softlockup_all_cpu_backtrace) {
  407. /* Avoid generating two back traces for current
  408. * given that one is already made above
  409. */
  410. trigger_allbutself_cpu_backtrace();
  411. clear_bit(0, &soft_lockup_nmi_warn);
  412. /* Barrier to sync with other cpus */
  413. smp_mb__after_atomic();
  414. }
  415. add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
  416. if (softlockup_panic)
  417. panic("softlockup: hung tasks");
  418. __this_cpu_write(soft_watchdog_warn, true);
  419. } else
  420. __this_cpu_write(soft_watchdog_warn, false);
  421. return HRTIMER_RESTART;
  422. }
  423. static void watchdog_set_prio(unsigned int policy, unsigned int prio)
  424. {
  425. struct sched_param param = { .sched_priority = prio };
  426. sched_setscheduler(current, policy, &param);
  427. }
  428. static void watchdog_enable(unsigned int cpu)
  429. {
  430. struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
  431. /* kick off the timer for the hardlockup detector */
  432. hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  433. hrtimer->function = watchdog_timer_fn;
  434. /* Enable the perf event */
  435. watchdog_nmi_enable(cpu);
  436. /* done here because hrtimer_start can only pin to smp_processor_id() */
  437. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  438. HRTIMER_MODE_REL_PINNED);
  439. /* initialize timestamp */
  440. watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
  441. __touch_watchdog();
  442. }
  443. static void watchdog_disable(unsigned int cpu)
  444. {
  445. struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
  446. watchdog_set_prio(SCHED_NORMAL, 0);
  447. hrtimer_cancel(hrtimer);
  448. /* disable the perf event */
  449. watchdog_nmi_disable(cpu);
  450. }
  451. static void watchdog_cleanup(unsigned int cpu, bool online)
  452. {
  453. watchdog_disable(cpu);
  454. }
  455. static int watchdog_should_run(unsigned int cpu)
  456. {
  457. return __this_cpu_read(hrtimer_interrupts) !=
  458. __this_cpu_read(soft_lockup_hrtimer_cnt);
  459. }
  460. /*
  461. * The watchdog thread function - touches the timestamp.
  462. *
  463. * It only runs once every sample_period seconds (4 seconds by
  464. * default) to reset the softlockup timestamp. If this gets delayed
  465. * for more than 2*watchdog_thresh seconds then the debug-printout
  466. * triggers in watchdog_timer_fn().
  467. */
  468. static void watchdog(unsigned int cpu)
  469. {
  470. __this_cpu_write(soft_lockup_hrtimer_cnt,
  471. __this_cpu_read(hrtimer_interrupts));
  472. __touch_watchdog();
  473. /*
  474. * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
  475. * failure path. Check for failures that can occur asynchronously -
  476. * for example, when CPUs are on-lined - and shut down the hardware
  477. * perf event on each CPU accordingly.
  478. *
  479. * The only non-obvious place this bit can be cleared is through
  480. * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
  481. * pr_info here would be too noisy as it would result in a message
  482. * every few seconds if the hardlockup was disabled but the softlockup
  483. * enabled.
  484. */
  485. if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
  486. watchdog_nmi_disable(cpu);
  487. }
  488. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  489. /*
  490. * People like the simple clean cpu node info on boot.
  491. * Reduce the watchdog noise by only printing messages
  492. * that are different from what cpu0 displayed.
  493. */
  494. static unsigned long cpu0_err;
  495. static int watchdog_nmi_enable(unsigned int cpu)
  496. {
  497. struct perf_event_attr *wd_attr;
  498. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  499. /* nothing to do if the hard lockup detector is disabled */
  500. if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
  501. goto out;
  502. /* is it already setup and enabled? */
  503. if (event && event->state > PERF_EVENT_STATE_OFF)
  504. goto out;
  505. /* it is setup but not enabled */
  506. if (event != NULL)
  507. goto out_enable;
  508. wd_attr = &wd_hw_attr;
  509. wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
  510. /* Try to register using hardware perf events */
  511. event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
  512. /* save cpu0 error for future comparision */
  513. if (cpu == 0 && IS_ERR(event))
  514. cpu0_err = PTR_ERR(event);
  515. if (!IS_ERR(event)) {
  516. /* only print for cpu0 or different than cpu0 */
  517. if (cpu == 0 || cpu0_err)
  518. pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
  519. goto out_save;
  520. }
  521. /*
  522. * Disable the hard lockup detector if _any_ CPU fails to set up
  523. * set up the hardware perf event. The watchdog() function checks
  524. * the NMI_WATCHDOG_ENABLED bit periodically.
  525. *
  526. * The barriers are for syncing up watchdog_enabled across all the
  527. * cpus, as clear_bit() does not use barriers.
  528. */
  529. smp_mb__before_atomic();
  530. clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
  531. smp_mb__after_atomic();
  532. /* skip displaying the same error again */
  533. if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
  534. return PTR_ERR(event);
  535. /* vary the KERN level based on the returned errno */
  536. if (PTR_ERR(event) == -EOPNOTSUPP)
  537. pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
  538. else if (PTR_ERR(event) == -ENOENT)
  539. pr_warn("disabled (cpu%i): hardware events not enabled\n",
  540. cpu);
  541. else
  542. pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
  543. cpu, PTR_ERR(event));
  544. pr_info("Shutting down hard lockup detector on all cpus\n");
  545. return PTR_ERR(event);
  546. /* success path */
  547. out_save:
  548. per_cpu(watchdog_ev, cpu) = event;
  549. out_enable:
  550. perf_event_enable(per_cpu(watchdog_ev, cpu));
  551. out:
  552. return 0;
  553. }
  554. static void watchdog_nmi_disable(unsigned int cpu)
  555. {
  556. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  557. if (event) {
  558. perf_event_disable(event);
  559. per_cpu(watchdog_ev, cpu) = NULL;
  560. /* should be in cleanup, but blocks oprofile */
  561. perf_event_release_kernel(event);
  562. }
  563. if (cpu == 0) {
  564. /* watchdog_nmi_enable() expects this to be zero initially. */
  565. cpu0_err = 0;
  566. }
  567. }
  568. #else
  569. static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
  570. static void watchdog_nmi_disable(unsigned int cpu) { return; }
  571. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  572. static struct smp_hotplug_thread watchdog_threads = {
  573. .store = &softlockup_watchdog,
  574. .thread_should_run = watchdog_should_run,
  575. .thread_fn = watchdog,
  576. .thread_comm = "watchdog/%u",
  577. .setup = watchdog_enable,
  578. .cleanup = watchdog_cleanup,
  579. .park = watchdog_disable,
  580. .unpark = watchdog_enable,
  581. };
  582. /*
  583. * park all watchdog threads that are specified in 'watchdog_cpumask'
  584. *
  585. * This function returns an error if kthread_park() of a watchdog thread
  586. * fails. In this situation, the watchdog threads of some CPUs can already
  587. * be parked and the watchdog threads of other CPUs can still be runnable.
  588. * Callers are expected to handle this special condition as appropriate in
  589. * their context.
  590. *
  591. * This function may only be called in a context that is protected against
  592. * races with CPU hotplug - for example, via get_online_cpus().
  593. */
  594. static int watchdog_park_threads(void)
  595. {
  596. int cpu, ret = 0;
  597. for_each_watchdog_cpu(cpu) {
  598. ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
  599. if (ret)
  600. break;
  601. }
  602. return ret;
  603. }
  604. /*
  605. * unpark all watchdog threads that are specified in 'watchdog_cpumask'
  606. *
  607. * This function may only be called in a context that is protected against
  608. * races with CPU hotplug - for example, via get_online_cpus().
  609. */
  610. static void watchdog_unpark_threads(void)
  611. {
  612. int cpu;
  613. for_each_watchdog_cpu(cpu)
  614. kthread_unpark(per_cpu(softlockup_watchdog, cpu));
  615. }
  616. /*
  617. * Suspend the hard and soft lockup detector by parking the watchdog threads.
  618. */
  619. int lockup_detector_suspend(void)
  620. {
  621. int ret = 0;
  622. get_online_cpus();
  623. mutex_lock(&watchdog_proc_mutex);
  624. /*
  625. * Multiple suspend requests can be active in parallel (counted by
  626. * the 'watchdog_suspended' variable). If the watchdog threads are
  627. * running, the first caller takes care that they will be parked.
  628. * The state of 'watchdog_running' cannot change while a suspend
  629. * request is active (see related code in 'proc' handlers).
  630. */
  631. if (watchdog_running && !watchdog_suspended)
  632. ret = watchdog_park_threads();
  633. if (ret == 0)
  634. watchdog_suspended++;
  635. else {
  636. watchdog_disable_all_cpus();
  637. pr_err("Failed to suspend lockup detectors, disabled\n");
  638. watchdog_enabled = 0;
  639. }
  640. mutex_unlock(&watchdog_proc_mutex);
  641. return ret;
  642. }
  643. /*
  644. * Resume the hard and soft lockup detector by unparking the watchdog threads.
  645. */
  646. void lockup_detector_resume(void)
  647. {
  648. mutex_lock(&watchdog_proc_mutex);
  649. watchdog_suspended--;
  650. /*
  651. * The watchdog threads are unparked if they were previously running
  652. * and if there is no more active suspend request.
  653. */
  654. if (watchdog_running && !watchdog_suspended)
  655. watchdog_unpark_threads();
  656. mutex_unlock(&watchdog_proc_mutex);
  657. put_online_cpus();
  658. }
  659. static int update_watchdog_all_cpus(void)
  660. {
  661. int ret;
  662. ret = watchdog_park_threads();
  663. if (ret)
  664. return ret;
  665. watchdog_unpark_threads();
  666. return 0;
  667. }
  668. static int watchdog_enable_all_cpus(void)
  669. {
  670. int err = 0;
  671. if (!watchdog_running) {
  672. err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
  673. &watchdog_cpumask);
  674. if (err)
  675. pr_err("Failed to create watchdog threads, disabled\n");
  676. else
  677. watchdog_running = 1;
  678. } else {
  679. /*
  680. * Enable/disable the lockup detectors or
  681. * change the sample period 'on the fly'.
  682. */
  683. err = update_watchdog_all_cpus();
  684. if (err) {
  685. watchdog_disable_all_cpus();
  686. pr_err("Failed to update lockup detectors, disabled\n");
  687. }
  688. }
  689. if (err)
  690. watchdog_enabled = 0;
  691. return err;
  692. }
  693. static void watchdog_disable_all_cpus(void)
  694. {
  695. if (watchdog_running) {
  696. watchdog_running = 0;
  697. smpboot_unregister_percpu_thread(&watchdog_threads);
  698. }
  699. }
  700. #ifdef CONFIG_SYSCTL
  701. /*
  702. * Update the run state of the lockup detectors.
  703. */
  704. static int proc_watchdog_update(void)
  705. {
  706. int err = 0;
  707. /*
  708. * Watchdog threads won't be started if they are already active.
  709. * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
  710. * care of this. If those threads are already active, the sample
  711. * period will be updated and the lockup detectors will be enabled
  712. * or disabled 'on the fly'.
  713. */
  714. if (watchdog_enabled && watchdog_thresh)
  715. err = watchdog_enable_all_cpus();
  716. else
  717. watchdog_disable_all_cpus();
  718. return err;
  719. }
  720. /*
  721. * common function for watchdog, nmi_watchdog and soft_watchdog parameter
  722. *
  723. * caller | table->data points to | 'which' contains the flag(s)
  724. * -------------------|-----------------------|-----------------------------
  725. * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
  726. * | | with SOFT_WATCHDOG_ENABLED
  727. * -------------------|-----------------------|-----------------------------
  728. * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
  729. * -------------------|-----------------------|-----------------------------
  730. * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
  731. */
  732. static int proc_watchdog_common(int which, struct ctl_table *table, int write,
  733. void __user *buffer, size_t *lenp, loff_t *ppos)
  734. {
  735. int err, old, new;
  736. int *watchdog_param = (int *)table->data;
  737. get_online_cpus();
  738. mutex_lock(&watchdog_proc_mutex);
  739. if (watchdog_suspended) {
  740. /* no parameter changes allowed while watchdog is suspended */
  741. err = -EAGAIN;
  742. goto out;
  743. }
  744. /*
  745. * If the parameter is being read return the state of the corresponding
  746. * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
  747. * run state of the lockup detectors.
  748. */
  749. if (!write) {
  750. *watchdog_param = (watchdog_enabled & which) != 0;
  751. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  752. } else {
  753. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  754. if (err)
  755. goto out;
  756. /*
  757. * There is a race window between fetching the current value
  758. * from 'watchdog_enabled' and storing the new value. During
  759. * this race window, watchdog_nmi_enable() can sneak in and
  760. * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
  761. * The 'cmpxchg' detects this race and the loop retries.
  762. */
  763. do {
  764. old = watchdog_enabled;
  765. /*
  766. * If the parameter value is not zero set the
  767. * corresponding bit(s), else clear it(them).
  768. */
  769. if (*watchdog_param)
  770. new = old | which;
  771. else
  772. new = old & ~which;
  773. } while (cmpxchg(&watchdog_enabled, old, new) != old);
  774. /*
  775. * Update the run state of the lockup detectors. There is _no_
  776. * need to check the value returned by proc_watchdog_update()
  777. * and to restore the previous value of 'watchdog_enabled' as
  778. * both lockup detectors are disabled if proc_watchdog_update()
  779. * returns an error.
  780. */
  781. err = proc_watchdog_update();
  782. }
  783. out:
  784. mutex_unlock(&watchdog_proc_mutex);
  785. put_online_cpus();
  786. return err;
  787. }
  788. /*
  789. * /proc/sys/kernel/watchdog
  790. */
  791. int proc_watchdog(struct ctl_table *table, int write,
  792. void __user *buffer, size_t *lenp, loff_t *ppos)
  793. {
  794. return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
  795. table, write, buffer, lenp, ppos);
  796. }
  797. /*
  798. * /proc/sys/kernel/nmi_watchdog
  799. */
  800. int proc_nmi_watchdog(struct ctl_table *table, int write,
  801. void __user *buffer, size_t *lenp, loff_t *ppos)
  802. {
  803. return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
  804. table, write, buffer, lenp, ppos);
  805. }
  806. /*
  807. * /proc/sys/kernel/soft_watchdog
  808. */
  809. int proc_soft_watchdog(struct ctl_table *table, int write,
  810. void __user *buffer, size_t *lenp, loff_t *ppos)
  811. {
  812. return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
  813. table, write, buffer, lenp, ppos);
  814. }
  815. /*
  816. * /proc/sys/kernel/watchdog_thresh
  817. */
  818. int proc_watchdog_thresh(struct ctl_table *table, int write,
  819. void __user *buffer, size_t *lenp, loff_t *ppos)
  820. {
  821. int err, old;
  822. get_online_cpus();
  823. mutex_lock(&watchdog_proc_mutex);
  824. if (watchdog_suspended) {
  825. /* no parameter changes allowed while watchdog is suspended */
  826. err = -EAGAIN;
  827. goto out;
  828. }
  829. old = ACCESS_ONCE(watchdog_thresh);
  830. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  831. if (err || !write)
  832. goto out;
  833. /*
  834. * Update the sample period. Restore on failure.
  835. */
  836. set_sample_period();
  837. err = proc_watchdog_update();
  838. if (err) {
  839. watchdog_thresh = old;
  840. set_sample_period();
  841. }
  842. out:
  843. mutex_unlock(&watchdog_proc_mutex);
  844. put_online_cpus();
  845. return err;
  846. }
  847. /*
  848. * The cpumask is the mask of possible cpus that the watchdog can run
  849. * on, not the mask of cpus it is actually running on. This allows the
  850. * user to specify a mask that will include cpus that have not yet
  851. * been brought online, if desired.
  852. */
  853. int proc_watchdog_cpumask(struct ctl_table *table, int write,
  854. void __user *buffer, size_t *lenp, loff_t *ppos)
  855. {
  856. int err;
  857. get_online_cpus();
  858. mutex_lock(&watchdog_proc_mutex);
  859. if (watchdog_suspended) {
  860. /* no parameter changes allowed while watchdog is suspended */
  861. err = -EAGAIN;
  862. goto out;
  863. }
  864. err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
  865. if (!err && write) {
  866. /* Remove impossible cpus to keep sysctl output cleaner. */
  867. cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
  868. cpu_possible_mask);
  869. if (watchdog_running) {
  870. /*
  871. * Failure would be due to being unable to allocate
  872. * a temporary cpumask, so we are likely not in a
  873. * position to do much else to make things better.
  874. */
  875. if (smpboot_update_cpumask_percpu_thread(
  876. &watchdog_threads, &watchdog_cpumask) != 0)
  877. pr_err("cpumask update failed\n");
  878. }
  879. }
  880. out:
  881. mutex_unlock(&watchdog_proc_mutex);
  882. put_online_cpus();
  883. return err;
  884. }
  885. #endif /* CONFIG_SYSCTL */
  886. void __init lockup_detector_init(void)
  887. {
  888. set_sample_period();
  889. #ifdef CONFIG_NO_HZ_FULL
  890. if (tick_nohz_full_enabled()) {
  891. pr_info("Disabling watchdog on nohz_full cores by default\n");
  892. cpumask_copy(&watchdog_cpumask, housekeeping_mask);
  893. } else
  894. cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
  895. #else
  896. cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
  897. #endif
  898. if (watchdog_enabled)
  899. watchdog_enable_all_cpus();
  900. }