power9-pmu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * Performance counter support for POWER9 processors.
  3. *
  4. * Copyright 2009 Paul Mackerras, IBM Corporation.
  5. * Copyright 2013 Michael Ellerman, IBM Corporation.
  6. * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or later version.
  12. */
  13. #define pr_fmt(fmt) "power9-pmu: " fmt
  14. #include "isa207-common.h"
  15. /*
  16. * Raw event encoding for Power9:
  17. *
  18. * 60 56 52 48 44 40 36 32
  19. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  20. * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
  21. * | | | | |
  22. * | | *- IFM (Linux) | thresh start/stop -*
  23. * | *- BHRB (Linux) *sm
  24. * *- EBB (Linux)
  25. *
  26. * 28 24 20 16 12 8 4 0
  27. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  28. * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
  29. * | | | | |
  30. * | | | | *- mark
  31. * | | *- L1/L2/L3 cache_sel |
  32. * | | |
  33. * | *- sampling mode for marked events *- combine
  34. * |
  35. * *- thresh_sel
  36. *
  37. * Below uses IBM bit numbering.
  38. *
  39. * MMCR1[x:y] = unit (PMCxUNIT)
  40. * MMCR1[24] = pmc1combine[0]
  41. * MMCR1[25] = pmc1combine[1]
  42. * MMCR1[26] = pmc2combine[0]
  43. * MMCR1[27] = pmc2combine[1]
  44. * MMCR1[28] = pmc3combine[0]
  45. * MMCR1[29] = pmc3combine[1]
  46. * MMCR1[30] = pmc4combine[0]
  47. * MMCR1[31] = pmc4combine[1]
  48. *
  49. * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
  50. * MMCR1[20:27] = thresh_ctl
  51. * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
  52. * MMCR1[20:27] = thresh_ctl
  53. * else
  54. * MMCRA[48:55] = thresh_ctl (THRESH START/END)
  55. *
  56. * if thresh_sel:
  57. * MMCRA[45:47] = thresh_sel
  58. *
  59. * if thresh_cmp:
  60. * MMCRA[9:11] = thresh_cmp[0:2]
  61. * MMCRA[12:18] = thresh_cmp[3:9]
  62. *
  63. * if unit == 6 or unit == 7
  64. * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
  65. * else if unit == 8 or unit == 9:
  66. * if cache_sel[0] == 0: # L3 bank
  67. * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
  68. * else if cache_sel[0] == 1:
  69. * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
  70. * else if cache_sel[1]: # L1 event
  71. * MMCR1[16] = cache_sel[2]
  72.  * MMCR1[17] = cache_sel[3]
  73. *
  74. * if mark:
  75. * MMCRA[63] = 1 (SAMPLE_ENABLE)
  76. * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
  77.  * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
  78. *
  79. * if EBB and BHRB:
  80. * MMCRA[32:33] = IFM
  81. *
  82. * MMCRA[SDAR_MODE] = sm
  83. */
  84. /*
  85. * Some power9 event codes.
  86. */
  87. #define EVENT(_name, _code) _name = _code,
  88. enum {
  89. #include "power9-events-list.h"
  90. };
  91. #undef EVENT
  92. /* MMCRA IFM bits - POWER9 */
  93. #define POWER9_MMCRA_IFM1 0x0000000040000000UL
  94. #define POWER9_MMCRA_IFM2 0x0000000080000000UL
  95. #define POWER9_MMCRA_IFM3 0x00000000C0000000UL
  96. /* PowerISA v2.07 format attribute structure*/
  97. extern struct attribute_group isa207_pmu_format_group;
  98. /* Table of alternatives, sorted by column 0 */
  99. static const unsigned int power9_event_alternatives[][MAX_ALT] = {
  100. { PM_INST_DISP, PM_INST_DISP_ALT },
  101. };
  102. static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
  103. {
  104. int num_alt = 0;
  105. num_alt = isa207_get_alternatives(event, alt, power9_event_alternatives,
  106. (int)ARRAY_SIZE(power9_event_alternatives));
  107. return num_alt;
  108. }
  109. GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
  110. GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
  111. GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
  112. GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
  113. GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_CMPL);
  114. GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
  115. GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
  116. GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
  117. CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
  118. CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
  119. CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
  120. CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
  121. CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
  122. CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
  123. CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
  124. CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
  125. CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
  126. CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
  127. CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
  128. CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
  129. CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
  130. CACHE_EVENT_ATTR(branch-loads, PM_BRU_CMPL);
  131. CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
  132. CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
  133. static struct attribute *power9_events_attr[] = {
  134. GENERIC_EVENT_PTR(PM_CYC),
  135. GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
  136. GENERIC_EVENT_PTR(PM_CMPLU_STALL),
  137. GENERIC_EVENT_PTR(PM_INST_CMPL),
  138. GENERIC_EVENT_PTR(PM_BRU_CMPL),
  139. GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
  140. GENERIC_EVENT_PTR(PM_LD_REF_L1),
  141. GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
  142. CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN),
  143. CACHE_EVENT_PTR(PM_LD_REF_L1),
  144. CACHE_EVENT_PTR(PM_L1_PREF),
  145. CACHE_EVENT_PTR(PM_ST_MISS_L1),
  146. CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
  147. CACHE_EVENT_PTR(PM_INST_FROM_L1),
  148. CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
  149. CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
  150. CACHE_EVENT_PTR(PM_DATA_FROM_L3),
  151. CACHE_EVENT_PTR(PM_L3_PREF_ALL),
  152. CACHE_EVENT_PTR(PM_L2_ST_MISS),
  153. CACHE_EVENT_PTR(PM_L2_ST),
  154. CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
  155. CACHE_EVENT_PTR(PM_BRU_CMPL),
  156. CACHE_EVENT_PTR(PM_DTLB_MISS),
  157. CACHE_EVENT_PTR(PM_ITLB_MISS),
  158. NULL
  159. };
  160. static struct attribute_group power9_pmu_events_group = {
  161. .name = "events",
  162. .attrs = power9_events_attr,
  163. };
  164. static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
  165. &isa207_pmu_format_group,
  166. &power9_pmu_events_group,
  167. NULL,
  168. };
  169. PMU_FORMAT_ATTR(event, "config:0-51");
  170. PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
  171. PMU_FORMAT_ATTR(mark, "config:8");
  172. PMU_FORMAT_ATTR(combine, "config:10-11");
  173. PMU_FORMAT_ATTR(unit, "config:12-15");
  174. PMU_FORMAT_ATTR(pmc, "config:16-19");
  175. PMU_FORMAT_ATTR(cache_sel, "config:20-23");
  176. PMU_FORMAT_ATTR(sample_mode, "config:24-28");
  177. PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
  178. PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
  179. PMU_FORMAT_ATTR(thresh_start, "config:36-39");
  180. PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
  181. PMU_FORMAT_ATTR(sdar_mode, "config:50-51");
  182. static struct attribute *power9_pmu_format_attr[] = {
  183. &format_attr_event.attr,
  184. &format_attr_pmcxsel.attr,
  185. &format_attr_mark.attr,
  186. &format_attr_combine.attr,
  187. &format_attr_unit.attr,
  188. &format_attr_pmc.attr,
  189. &format_attr_cache_sel.attr,
  190. &format_attr_sample_mode.attr,
  191. &format_attr_thresh_sel.attr,
  192. &format_attr_thresh_stop.attr,
  193. &format_attr_thresh_start.attr,
  194. &format_attr_thresh_cmp.attr,
  195. &format_attr_sdar_mode.attr,
  196. NULL,
  197. };
  198. static struct attribute_group power9_pmu_format_group = {
  199. .name = "format",
  200. .attrs = power9_pmu_format_attr,
  201. };
  202. static const struct attribute_group *power9_pmu_attr_groups[] = {
  203. &power9_pmu_format_group,
  204. &power9_pmu_events_group,
  205. NULL,
  206. };
  207. static int power9_generic_events_dd1[] = {
  208. [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
  209. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
  210. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
  211. [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
  212. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL,
  213. [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
  214. [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
  215. [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
  216. };
  217. static int power9_generic_events[] = {
  218. [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
  219. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
  220. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
  221. [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
  222. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL,
  223. [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
  224. [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
  225. [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
  226. };
  227. static u64 power9_bhrb_filter_map(u64 branch_sample_type)
  228. {
  229. u64 pmu_bhrb_filter = 0;
  230. /* BHRB and regular PMU events share the same privilege state
  231. * filter configuration. BHRB is always recorded along with a
  232. * regular PMU event. As the privilege state filter is handled
  233. * in the basic PMC configuration of the accompanying regular
  234. * PMU event, we ignore any separate BHRB specific request.
  235. */
  236. /* No branch filter requested */
  237. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
  238. return pmu_bhrb_filter;
  239. /* Invalid branch filter options - HW does not support */
  240. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
  241. return -1;
  242. if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
  243. return -1;
  244. if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
  245. return -1;
  246. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
  247. pmu_bhrb_filter |= POWER9_MMCRA_IFM1;
  248. return pmu_bhrb_filter;
  249. }
  250. /* Every thing else is unsupported */
  251. return -1;
  252. }
  253. static void power9_config_bhrb(u64 pmu_bhrb_filter)
  254. {
  255. /* Enable BHRB filter in PMU */
  256. mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
  257. }
  258. #define C(x) PERF_COUNT_HW_CACHE_##x
  259. /*
  260. * Table of generalized cache-related events.
  261. * 0 means not supported, -1 means nonsensical, other values
  262. * are event codes.
  263. */
  264. static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
  265. [ C(L1D) ] = {
  266. [ C(OP_READ) ] = {
  267. [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
  268. [ C(RESULT_MISS) ] = PM_LD_MISS_L1_FIN,
  269. },
  270. [ C(OP_WRITE) ] = {
  271. [ C(RESULT_ACCESS) ] = 0,
  272. [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
  273. },
  274. [ C(OP_PREFETCH) ] = {
  275. [ C(RESULT_ACCESS) ] = PM_L1_PREF,
  276. [ C(RESULT_MISS) ] = 0,
  277. },
  278. },
  279. [ C(L1I) ] = {
  280. [ C(OP_READ) ] = {
  281. [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
  282. [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
  283. },
  284. [ C(OP_WRITE) ] = {
  285. [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
  286. [ C(RESULT_MISS) ] = -1,
  287. },
  288. [ C(OP_PREFETCH) ] = {
  289. [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
  290. [ C(RESULT_MISS) ] = 0,
  291. },
  292. },
  293. [ C(LL) ] = {
  294. [ C(OP_READ) ] = {
  295. [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
  296. [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
  297. },
  298. [ C(OP_WRITE) ] = {
  299. [ C(RESULT_ACCESS) ] = PM_L2_ST,
  300. [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
  301. },
  302. [ C(OP_PREFETCH) ] = {
  303. [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
  304. [ C(RESULT_MISS) ] = 0,
  305. },
  306. },
  307. [ C(DTLB) ] = {
  308. [ C(OP_READ) ] = {
  309. [ C(RESULT_ACCESS) ] = 0,
  310. [ C(RESULT_MISS) ] = PM_DTLB_MISS,
  311. },
  312. [ C(OP_WRITE) ] = {
  313. [ C(RESULT_ACCESS) ] = -1,
  314. [ C(RESULT_MISS) ] = -1,
  315. },
  316. [ C(OP_PREFETCH) ] = {
  317. [ C(RESULT_ACCESS) ] = -1,
  318. [ C(RESULT_MISS) ] = -1,
  319. },
  320. },
  321. [ C(ITLB) ] = {
  322. [ C(OP_READ) ] = {
  323. [ C(RESULT_ACCESS) ] = 0,
  324. [ C(RESULT_MISS) ] = PM_ITLB_MISS,
  325. },
  326. [ C(OP_WRITE) ] = {
  327. [ C(RESULT_ACCESS) ] = -1,
  328. [ C(RESULT_MISS) ] = -1,
  329. },
  330. [ C(OP_PREFETCH) ] = {
  331. [ C(RESULT_ACCESS) ] = -1,
  332. [ C(RESULT_MISS) ] = -1,
  333. },
  334. },
  335. [ C(BPU) ] = {
  336. [ C(OP_READ) ] = {
  337. [ C(RESULT_ACCESS) ] = PM_BRU_CMPL,
  338. [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
  339. },
  340. [ C(OP_WRITE) ] = {
  341. [ C(RESULT_ACCESS) ] = -1,
  342. [ C(RESULT_MISS) ] = -1,
  343. },
  344. [ C(OP_PREFETCH) ] = {
  345. [ C(RESULT_ACCESS) ] = -1,
  346. [ C(RESULT_MISS) ] = -1,
  347. },
  348. },
  349. [ C(NODE) ] = {
  350. [ C(OP_READ) ] = {
  351. [ C(RESULT_ACCESS) ] = -1,
  352. [ C(RESULT_MISS) ] = -1,
  353. },
  354. [ C(OP_WRITE) ] = {
  355. [ C(RESULT_ACCESS) ] = -1,
  356. [ C(RESULT_MISS) ] = -1,
  357. },
  358. [ C(OP_PREFETCH) ] = {
  359. [ C(RESULT_ACCESS) ] = -1,
  360. [ C(RESULT_MISS) ] = -1,
  361. },
  362. },
  363. };
  364. #undef C
  365. static struct power_pmu power9_isa207_pmu = {
  366. .name = "POWER9",
  367. .n_counter = MAX_PMU_COUNTERS,
  368. .add_fields = ISA207_ADD_FIELDS,
  369. .test_adder = ISA207_TEST_ADDER,
  370. .compute_mmcr = isa207_compute_mmcr,
  371. .config_bhrb = power9_config_bhrb,
  372. .bhrb_filter_map = power9_bhrb_filter_map,
  373. .get_constraint = isa207_get_constraint,
  374. .get_alternatives = power9_get_alternatives,
  375. .disable_pmc = isa207_disable_pmc,
  376. .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
  377. .n_generic = ARRAY_SIZE(power9_generic_events_dd1),
  378. .generic_events = power9_generic_events_dd1,
  379. .cache_events = &power9_cache_events,
  380. .attr_groups = power9_isa207_pmu_attr_groups,
  381. .bhrb_nr = 32,
  382. };
  383. static struct power_pmu power9_pmu = {
  384. .name = "POWER9",
  385. .n_counter = MAX_PMU_COUNTERS,
  386. .add_fields = ISA207_ADD_FIELDS,
  387. .test_adder = P9_DD1_TEST_ADDER,
  388. .compute_mmcr = isa207_compute_mmcr,
  389. .config_bhrb = power9_config_bhrb,
  390. .bhrb_filter_map = power9_bhrb_filter_map,
  391. .get_constraint = isa207_get_constraint,
  392. .get_alternatives = power9_get_alternatives,
  393. .disable_pmc = isa207_disable_pmc,
  394. .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
  395. .n_generic = ARRAY_SIZE(power9_generic_events),
  396. .generic_events = power9_generic_events,
  397. .cache_events = &power9_cache_events,
  398. .attr_groups = power9_pmu_attr_groups,
  399. .bhrb_nr = 32,
  400. };
  401. static int __init init_power9_pmu(void)
  402. {
  403. int rc = 0;
  404. /* Comes from cpu_specs[] */
  405. if (!cur_cpu_spec->oprofile_cpu_type ||
  406. strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
  407. return -ENODEV;
  408. if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
  409. /*
  410. * Since PM_INST_CMPL may not provide right counts in all
  411. * sampling scenarios in power9 DD1, instead use PM_INST_DISP.
  412. */
  413. EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
  414. rc = register_power_pmu(&power9_isa207_pmu);
  415. } else {
  416. rc = register_power_pmu(&power9_pmu);
  417. }
  418. if (rc)
  419. return rc;
  420. /* Tell userspace that EBB is supported */
  421. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
  422. return 0;
  423. }
  424. early_initcall(init_power9_pmu);