hw_nmi.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * HW NMI watchdog support
  3. *
  4. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5. *
  6. * Arch specific calls to support NMI watchdog
  7. *
  8. * Bits copied from original nmi.c file
  9. *
  10. */
  11. #include <asm/apic.h>
  12. #include <asm/nmi.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/kdebug.h>
  15. #include <linux/notifier.h>
  16. #include <linux/kprobes.h>
  17. #include <linux/nmi.h>
  18. #include <linux/module.h>
  19. #include <linux/delay.h>
  20. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  21. u64 hw_nmi_get_sample_period(int watchdog_thresh)
  22. {
  23. return (u64)(cpu_khz) * 1000 * watchdog_thresh;
  24. }
  25. #endif
  26. #ifdef arch_trigger_all_cpu_backtrace
  27. /* For reliability, we're prepared to waste bits here. */
  28. static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
  29. /* "in progress" flag of arch_trigger_all_cpu_backtrace */
  30. static unsigned long backtrace_flag;
  31. void arch_trigger_all_cpu_backtrace(bool include_self)
  32. {
  33. int i;
  34. int cpu = get_cpu();
  35. if (test_and_set_bit(0, &backtrace_flag)) {
  36. /*
  37. * If there is already a trigger_all_cpu_backtrace() in progress
  38. * (backtrace_flag == 1), don't output double cpu dump infos.
  39. */
  40. put_cpu();
  41. return;
  42. }
  43. cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
  44. if (!include_self)
  45. cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
  46. if (!cpumask_empty(to_cpumask(backtrace_mask))) {
  47. pr_info("sending NMI to %s CPUs:\n",
  48. (include_self ? "all" : "other"));
  49. apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
  50. }
  51. /* Wait for up to 10 seconds for all CPUs to do the backtrace */
  52. for (i = 0; i < 10 * 1000; i++) {
  53. if (cpumask_empty(to_cpumask(backtrace_mask)))
  54. break;
  55. mdelay(1);
  56. touch_softlockup_watchdog();
  57. }
  58. clear_bit(0, &backtrace_flag);
  59. smp_mb__after_atomic();
  60. put_cpu();
  61. }
  62. static int
  63. arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
  64. {
  65. int cpu;
  66. cpu = smp_processor_id();
  67. if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
  68. static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
  69. arch_spin_lock(&lock);
  70. printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
  71. show_regs(regs);
  72. arch_spin_unlock(&lock);
  73. cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
  74. return NMI_HANDLED;
  75. }
  76. return NMI_DONE;
  77. }
  78. NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler);
  79. static int __init register_trigger_all_cpu_backtrace(void)
  80. {
  81. register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
  82. 0, "arch_bt");
  83. return 0;
  84. }
  85. early_initcall(register_trigger_all_cpu_backtrace);
  86. #endif