perfctr-watchdog.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * local apic based NMI watchdog for various CPUs.
  4. *
  5. * This file also handles reservation of performance counters for coordination
  6. * with other users (like oprofile).
  7. *
  8. * Note that these events normally don't tick when the CPU idles. This means
  9. * the frequency varies with CPU load.
  10. *
  11. * Original code for K7/P6 written by Keith Owens
  12. *
  13. */
  14. #include <linux/percpu.h>
  15. #include <linux/export.h>
  16. #include <linux/kernel.h>
  17. #include <linux/bitops.h>
  18. #include <linux/smp.h>
  19. #include <asm/nmi.h>
  20. #include <linux/kprobes.h>
  21. #include <asm/apic.h>
  22. #include <asm/perf_event.h>
  23. /*
  24. * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  25. * offset from MSR_P4_BSU_ESCR0.
  26. *
  27. * It will be the max for all platforms (for now)
  28. */
  29. #define NMI_MAX_COUNTER_BITS 66
  30. /*
  31. * perfctr_nmi_owner tracks the ownership of the perfctr registers:
  32. * evtsel_nmi_owner tracks the ownership of the event selection
  33. * - different performance counters/ event selection may be reserved for
  34. * different subsystems this reservation system just tries to coordinate
  35. * things a little
  36. */
  37. static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
  38. static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
  39. /* converts an msr to an appropriate reservation bit */
  40. static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  41. {
  42. /* returns the bit offset of the performance counter register */
  43. switch (boot_cpu_data.x86_vendor) {
  44. case X86_VENDOR_HYGON:
  45. case X86_VENDOR_AMD:
  46. if (msr >= MSR_F15H_PERF_CTR)
  47. return (msr - MSR_F15H_PERF_CTR) >> 1;
  48. return msr - MSR_K7_PERFCTR0;
  49. case X86_VENDOR_INTEL:
  50. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  51. return msr - MSR_ARCH_PERFMON_PERFCTR0;
  52. switch (boot_cpu_data.x86) {
  53. case 6:
  54. return msr - MSR_P6_PERFCTR0;
  55. case 11:
  56. return msr - MSR_KNC_PERFCTR0;
  57. case 15:
  58. return msr - MSR_P4_BPU_PERFCTR0;
  59. }
  60. }
  61. return 0;
  62. }
  63. /*
  64. * converts an msr to an appropriate reservation bit
  65. * returns the bit offset of the event selection register
  66. */
  67. static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  68. {
  69. /* returns the bit offset of the event selection register */
  70. switch (boot_cpu_data.x86_vendor) {
  71. case X86_VENDOR_HYGON:
  72. case X86_VENDOR_AMD:
  73. if (msr >= MSR_F15H_PERF_CTL)
  74. return (msr - MSR_F15H_PERF_CTL) >> 1;
  75. return msr - MSR_K7_EVNTSEL0;
  76. case X86_VENDOR_INTEL:
  77. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  78. return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  79. switch (boot_cpu_data.x86) {
  80. case 6:
  81. return msr - MSR_P6_EVNTSEL0;
  82. case 11:
  83. return msr - MSR_KNC_EVNTSEL0;
  84. case 15:
  85. return msr - MSR_P4_BSU_ESCR0;
  86. }
  87. }
  88. return 0;
  89. }
  90. /* checks for a bit availability (hack for oprofile) */
  91. int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
  92. {
  93. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  94. return !test_bit(counter, perfctr_nmi_owner);
  95. }
  96. EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
  97. int reserve_perfctr_nmi(unsigned int msr)
  98. {
  99. unsigned int counter;
  100. counter = nmi_perfctr_msr_to_bit(msr);
  101. /* register not managed by the allocator? */
  102. if (counter > NMI_MAX_COUNTER_BITS)
  103. return 1;
  104. if (!test_and_set_bit(counter, perfctr_nmi_owner))
  105. return 1;
  106. return 0;
  107. }
  108. EXPORT_SYMBOL(reserve_perfctr_nmi);
  109. void release_perfctr_nmi(unsigned int msr)
  110. {
  111. unsigned int counter;
  112. counter = nmi_perfctr_msr_to_bit(msr);
  113. /* register not managed by the allocator? */
  114. if (counter > NMI_MAX_COUNTER_BITS)
  115. return;
  116. clear_bit(counter, perfctr_nmi_owner);
  117. }
  118. EXPORT_SYMBOL(release_perfctr_nmi);
  119. int reserve_evntsel_nmi(unsigned int msr)
  120. {
  121. unsigned int counter;
  122. counter = nmi_evntsel_msr_to_bit(msr);
  123. /* register not managed by the allocator? */
  124. if (counter > NMI_MAX_COUNTER_BITS)
  125. return 1;
  126. if (!test_and_set_bit(counter, evntsel_nmi_owner))
  127. return 1;
  128. return 0;
  129. }
  130. EXPORT_SYMBOL(reserve_evntsel_nmi);
  131. void release_evntsel_nmi(unsigned int msr)
  132. {
  133. unsigned int counter;
  134. counter = nmi_evntsel_msr_to_bit(msr);
  135. /* register not managed by the allocator? */
  136. if (counter > NMI_MAX_COUNTER_BITS)
  137. return;
  138. clear_bit(counter, evntsel_nmi_owner);
  139. }
  140. EXPORT_SYMBOL(release_evntsel_nmi);