perf_event.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. #ifndef _ASM_X86_PERF_EVENT_H
  2. #define _ASM_X86_PERF_EVENT_H
  3. /*
  4. * Performance event hw details:
  5. */
  6. #define INTEL_PMC_MAX_GENERIC 32
  7. #define INTEL_PMC_MAX_FIXED 3
  8. #define INTEL_PMC_IDX_FIXED 32
  9. #define X86_PMC_IDX_MAX 64
  10. #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
  11. #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
  12. #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
  13. #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
  14. #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
  15. #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
  16. #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
  17. #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
  18. #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
  19. #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
  20. #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
  21. #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
  22. #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
  23. #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
  24. #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
  25. #define HSW_IN_TX (1ULL << 32)
  26. #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
  27. #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
  28. #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
  29. #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
  30. #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
  31. #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
  32. (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
  33. #define AMD64_EVENTSEL_EVENT \
  34. (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
  35. #define INTEL_ARCH_EVENT_MASK \
  36. (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
  37. #define X86_RAW_EVENT_MASK \
  38. (ARCH_PERFMON_EVENTSEL_EVENT | \
  39. ARCH_PERFMON_EVENTSEL_UMASK | \
  40. ARCH_PERFMON_EVENTSEL_EDGE | \
  41. ARCH_PERFMON_EVENTSEL_INV | \
  42. ARCH_PERFMON_EVENTSEL_CMASK)
  43. #define X86_ALL_EVENT_FLAGS \
  44. (ARCH_PERFMON_EVENTSEL_EDGE | \
  45. ARCH_PERFMON_EVENTSEL_INV | \
  46. ARCH_PERFMON_EVENTSEL_CMASK | \
  47. ARCH_PERFMON_EVENTSEL_ANY | \
  48. ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
  49. HSW_IN_TX | \
  50. HSW_IN_TX_CHECKPOINTED)
  51. #define AMD64_RAW_EVENT_MASK \
  52. (X86_RAW_EVENT_MASK | \
  53. AMD64_EVENTSEL_EVENT)
  54. #define AMD64_RAW_EVENT_MASK_NB \
  55. (AMD64_EVENTSEL_EVENT | \
  56. ARCH_PERFMON_EVENTSEL_UMASK)
  57. #define AMD64_NUM_COUNTERS 4
  58. #define AMD64_NUM_COUNTERS_CORE 6
  59. #define AMD64_NUM_COUNTERS_NB 4
  60. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
  61. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
  62. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
  63. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
  64. (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
  65. #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
  66. #define ARCH_PERFMON_EVENTS_COUNT 7
  67. /*
  68. * Intel "Architectural Performance Monitoring" CPUID
  69. * detection/enumeration details:
  70. */
  71. union cpuid10_eax {
  72. struct {
  73. unsigned int version_id:8;
  74. unsigned int num_counters:8;
  75. unsigned int bit_width:8;
  76. unsigned int mask_length:8;
  77. } split;
  78. unsigned int full;
  79. };
  80. union cpuid10_ebx {
  81. struct {
  82. unsigned int no_unhalted_core_cycles:1;
  83. unsigned int no_instructions_retired:1;
  84. unsigned int no_unhalted_reference_cycles:1;
  85. unsigned int no_llc_reference:1;
  86. unsigned int no_llc_misses:1;
  87. unsigned int no_branch_instruction_retired:1;
  88. unsigned int no_branch_misses_retired:1;
  89. } split;
  90. unsigned int full;
  91. };
  92. union cpuid10_edx {
  93. struct {
  94. unsigned int num_counters_fixed:5;
  95. unsigned int bit_width_fixed:8;
  96. unsigned int reserved:19;
  97. } split;
  98. unsigned int full;
  99. };
  100. struct x86_pmu_capability {
  101. int version;
  102. int num_counters_gp;
  103. int num_counters_fixed;
  104. int bit_width_gp;
  105. int bit_width_fixed;
  106. unsigned int events_mask;
  107. int events_mask_len;
  108. };
  109. /*
  110. * Fixed-purpose performance events:
  111. */
  112. /*
  113. * All 3 fixed-mode PMCs are configured via this single MSR:
  114. */
  115. #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
  116. /*
  117. * The counts are available in three separate MSRs:
  118. */
  119. /* Instr_Retired.Any: */
  120. #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
  121. #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
  122. /* CPU_CLK_Unhalted.Core: */
  123. #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
  124. #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
  125. /* CPU_CLK_Unhalted.Ref: */
  126. #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
  127. #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
  128. #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
  129. /*
  130. * We model BTS tracing as another fixed-mode PMC.
  131. *
  132. * We choose a value in the middle of the fixed event range, since lower
  133. * values are used by actual fixed events and higher values are used
  134. * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
  135. */
  136. #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
  137. /*
  138. * IBS cpuid feature detection
  139. */
  140. #define IBS_CPUID_FEATURES 0x8000001b
  141. /*
  142. * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
  143. * bit 0 is used to indicate the existence of IBS.
  144. */
  145. #define IBS_CAPS_AVAIL (1U<<0)
  146. #define IBS_CAPS_FETCHSAM (1U<<1)
  147. #define IBS_CAPS_OPSAM (1U<<2)
  148. #define IBS_CAPS_RDWROPCNT (1U<<3)
  149. #define IBS_CAPS_OPCNT (1U<<4)
  150. #define IBS_CAPS_BRNTRGT (1U<<5)
  151. #define IBS_CAPS_OPCNTEXT (1U<<6)
  152. #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
  153. #define IBS_CAPS_OPBRNFUSE (1U<<8)
  154. #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
  155. #define IBS_CAPS_OPDATA4 (1U<<10)
  156. #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
  157. | IBS_CAPS_FETCHSAM \
  158. | IBS_CAPS_OPSAM)
  159. /*
  160. * IBS APIC setup
  161. */
  162. #define IBSCTL 0x1cc
  163. #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
  164. #define IBSCTL_LVT_OFFSET_MASK 0x0F
  165. /* ibs fetch bits/masks */
  166. #define IBS_FETCH_RAND_EN (1ULL<<57)
  167. #define IBS_FETCH_VAL (1ULL<<49)
  168. #define IBS_FETCH_ENABLE (1ULL<<48)
  169. #define IBS_FETCH_CNT 0xFFFF0000ULL
  170. #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
  171. /* ibs op bits/masks */
  172. /* lower 4 bits of the current count are ignored: */
  173. #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
  174. #define IBS_OP_CNT_CTL (1ULL<<19)
  175. #define IBS_OP_VAL (1ULL<<18)
  176. #define IBS_OP_ENABLE (1ULL<<17)
  177. #define IBS_OP_MAX_CNT 0x0000FFFFULL
  178. #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
  179. #define IBS_RIP_INVALID (1ULL<<38)
  180. #ifdef CONFIG_X86_LOCAL_APIC
  181. extern u32 get_ibs_caps(void);
  182. #else
  183. static inline u32 get_ibs_caps(void) { return 0; }
  184. #endif
  185. #ifdef CONFIG_PERF_EVENTS
  186. extern void perf_events_lapic_init(void);
  187. /*
  188. * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
  189. * unused and ABI specified to be 0, so nobody should care what we do with
  190. * them.
  191. *
  192. * EXACT - the IP points to the exact instruction that triggered the
  193. * event (HW bugs exempt).
  194. * VM - original X86_VM_MASK; see set_linear_ip().
  195. */
  196. #define PERF_EFLAGS_EXACT (1UL << 3)
  197. #define PERF_EFLAGS_VM (1UL << 5)
  198. struct pt_regs;
  199. extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  200. extern unsigned long perf_misc_flags(struct pt_regs *regs);
  201. #define perf_misc_flags(regs) perf_misc_flags(regs)
  202. #include <asm/stacktrace.h>
  203. /*
  204. * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
  205. * and the comment with PERF_EFLAGS_EXACT.
  206. */
  207. #define perf_arch_fetch_caller_regs(regs, __ip) { \
  208. (regs)->ip = (__ip); \
  209. (regs)->bp = caller_frame_pointer(); \
  210. (regs)->cs = __KERNEL_CS; \
  211. regs->flags = 0; \
  212. asm volatile( \
  213. _ASM_MOV "%%"_ASM_SP ", %0\n" \
  214. : "=m" ((regs)->sp) \
  215. :: "memory" \
  216. ); \
  217. }
  218. struct perf_guest_switch_msr {
  219. unsigned msr;
  220. u64 host, guest;
  221. };
  222. extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
  223. extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
  224. extern void perf_check_microcode(void);
  225. #else
  226. static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
  227. {
  228. *nr = 0;
  229. return NULL;
  230. }
  231. static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
  232. {
  233. memset(cap, 0, sizeof(*cap));
  234. }
  235. static inline void perf_events_lapic_init(void) { }
  236. static inline void perf_check_microcode(void) { }
  237. #endif
  238. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
  239. extern void amd_pmu_enable_virt(void);
  240. extern void amd_pmu_disable_virt(void);
  241. #else
  242. static inline void amd_pmu_enable_virt(void) { }
  243. static inline void amd_pmu_disable_virt(void) { }
  244. #endif
  245. #define arch_perf_out_copy_user copy_from_user_nmi
  246. #endif /* _ASM_X86_PERF_EVENT_H */