nmi.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * linux/include/linux/nmi.h
  3. */
  4. #ifndef LINUX_NMI_H
  5. #define LINUX_NMI_H
  6. #include <linux/sched.h>
  7. #include <asm/irq.h>
  8. #if defined(CONFIG_HAVE_NMI_WATCHDOG)
  9. #include <asm/nmi.h>
  10. #endif
  11. #ifdef CONFIG_LOCKUP_DETECTOR
  12. void lockup_detector_init(void);
  13. #else
  14. static inline void lockup_detector_init(void)
  15. {
  16. }
  17. #endif
  18. #ifdef CONFIG_SOFTLOCKUP_DETECTOR
  19. extern void touch_softlockup_watchdog_sched(void);
  20. extern void touch_softlockup_watchdog(void);
  21. extern void touch_softlockup_watchdog_sync(void);
  22. extern void touch_all_softlockup_watchdogs(void);
  23. extern unsigned int softlockup_panic;
  24. extern int soft_watchdog_enabled;
  25. extern atomic_t watchdog_park_in_progress;
  26. #else
  27. static inline void touch_softlockup_watchdog_sched(void)
  28. {
  29. }
  30. static inline void touch_softlockup_watchdog(void)
  31. {
  32. }
  33. static inline void touch_softlockup_watchdog_sync(void)
  34. {
  35. }
  36. static inline void touch_all_softlockup_watchdogs(void)
  37. {
  38. }
  39. #endif
  40. #ifdef CONFIG_DETECT_HUNG_TASK
  41. void reset_hung_task_detector(void);
  42. #else
  43. static inline void reset_hung_task_detector(void)
  44. {
  45. }
  46. #endif
  47. /*
  48. * The run state of the lockup detectors is controlled by the content of the
  49. * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
  50. * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
  51. *
  52. * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
  53. * are variables that are only used as an 'interface' between the parameters
  54. * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
  55. * 'watchdog_thresh' variable is handled differently because its value is not
  56. * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
  57. * is equal zero.
  58. */
  59. #define NMI_WATCHDOG_ENABLED_BIT 0
  60. #define SOFT_WATCHDOG_ENABLED_BIT 1
  61. #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
  62. #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
  63. #if defined(CONFIG_HARDLOCKUP_DETECTOR)
  64. extern void hardlockup_detector_disable(void);
  65. extern unsigned int hardlockup_panic;
  66. #else
  67. static inline void hardlockup_detector_disable(void) {}
  68. #endif
  69. #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
  70. extern void arch_touch_nmi_watchdog(void);
  71. #else
  72. #if !defined(CONFIG_HAVE_NMI_WATCHDOG)
  73. static inline void arch_touch_nmi_watchdog(void) {}
  74. #endif
  75. #endif
  76. /**
  77. * touch_nmi_watchdog - restart NMI watchdog timeout.
  78. *
  79. * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
  80. * may be used to reset the timeout - for code which intentionally
  81. * disables interrupts for a long time. This call is stateless.
  82. */
  83. static inline void touch_nmi_watchdog(void)
  84. {
  85. arch_touch_nmi_watchdog();
  86. touch_softlockup_watchdog();
  87. }
  88. /*
  89. * Create trigger_all_cpu_backtrace() out of the arch-provided
  90. * base function. Return whether such support was available,
  91. * to allow calling code to fall back to some other mechanism:
  92. */
  93. #ifdef arch_trigger_cpumask_backtrace
  94. static inline bool trigger_all_cpu_backtrace(void)
  95. {
  96. arch_trigger_cpumask_backtrace(cpu_online_mask, false);
  97. return true;
  98. }
  99. static inline bool trigger_allbutself_cpu_backtrace(void)
  100. {
  101. arch_trigger_cpumask_backtrace(cpu_online_mask, true);
  102. return true;
  103. }
  104. static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
  105. {
  106. arch_trigger_cpumask_backtrace(mask, false);
  107. return true;
  108. }
  109. static inline bool trigger_single_cpu_backtrace(int cpu)
  110. {
  111. arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
  112. return true;
  113. }
  114. /* generic implementation */
  115. void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
  116. bool exclude_self,
  117. void (*raise)(cpumask_t *mask));
  118. bool nmi_cpu_backtrace(struct pt_regs *regs);
  119. #else
  120. static inline bool trigger_all_cpu_backtrace(void)
  121. {
  122. return false;
  123. }
  124. static inline bool trigger_allbutself_cpu_backtrace(void)
  125. {
  126. return false;
  127. }
  128. static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
  129. {
  130. return false;
  131. }
  132. static inline bool trigger_single_cpu_backtrace(int cpu)
  133. {
  134. return false;
  135. }
  136. #endif
  137. #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
  138. u64 hw_nmi_get_sample_period(int watchdog_thresh);
  139. #endif
  140. #ifdef CONFIG_LOCKUP_DETECTOR
  141. extern int nmi_watchdog_enabled;
  142. extern int watchdog_user_enabled;
  143. extern int watchdog_thresh;
  144. extern unsigned long watchdog_enabled;
  145. extern struct cpumask watchdog_cpumask;
  146. extern unsigned long *watchdog_cpumask_bits;
  147. extern int __read_mostly watchdog_suspended;
  148. #ifdef CONFIG_SMP
  149. extern int sysctl_softlockup_all_cpu_backtrace;
  150. extern int sysctl_hardlockup_all_cpu_backtrace;
  151. #else
  152. #define sysctl_softlockup_all_cpu_backtrace 0
  153. #define sysctl_hardlockup_all_cpu_backtrace 0
  154. #endif
  155. extern bool is_hardlockup(void);
  156. struct ctl_table;
  157. extern int proc_watchdog(struct ctl_table *, int ,
  158. void __user *, size_t *, loff_t *);
  159. extern int proc_nmi_watchdog(struct ctl_table *, int ,
  160. void __user *, size_t *, loff_t *);
  161. extern int proc_soft_watchdog(struct ctl_table *, int ,
  162. void __user *, size_t *, loff_t *);
  163. extern int proc_watchdog_thresh(struct ctl_table *, int ,
  164. void __user *, size_t *, loff_t *);
  165. extern int proc_watchdog_cpumask(struct ctl_table *, int,
  166. void __user *, size_t *, loff_t *);
  167. extern int lockup_detector_suspend(void);
  168. extern void lockup_detector_resume(void);
  169. #else
  170. static inline int lockup_detector_suspend(void)
  171. {
  172. return 0;
  173. }
  174. static inline void lockup_detector_resume(void)
  175. {
  176. }
  177. #endif
  178. #ifdef CONFIG_HAVE_ACPI_APEI_NMI
  179. #include <asm/nmi.h>
  180. #endif
  181. #endif