isa207-common.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * Common Performance counter support functions for PowerISA v2.07 processors.
  3. *
  4. * Copyright 2009 Paul Mackerras, IBM Corporation.
  5. * Copyright 2013 Michael Ellerman, IBM Corporation.
  6. * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include "isa207-common.h"
  14. PMU_FORMAT_ATTR(event, "config:0-49");
  15. PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
  16. PMU_FORMAT_ATTR(mark, "config:8");
  17. PMU_FORMAT_ATTR(combine, "config:11");
  18. PMU_FORMAT_ATTR(unit, "config:12-15");
  19. PMU_FORMAT_ATTR(pmc, "config:16-19");
  20. PMU_FORMAT_ATTR(cache_sel, "config:20-23");
  21. PMU_FORMAT_ATTR(sample_mode, "config:24-28");
  22. PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
  23. PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
  24. PMU_FORMAT_ATTR(thresh_start, "config:36-39");
  25. PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
  26. struct attribute *isa207_pmu_format_attr[] = {
  27. &format_attr_event.attr,
  28. &format_attr_pmcxsel.attr,
  29. &format_attr_mark.attr,
  30. &format_attr_combine.attr,
  31. &format_attr_unit.attr,
  32. &format_attr_pmc.attr,
  33. &format_attr_cache_sel.attr,
  34. &format_attr_sample_mode.attr,
  35. &format_attr_thresh_sel.attr,
  36. &format_attr_thresh_stop.attr,
  37. &format_attr_thresh_start.attr,
  38. &format_attr_thresh_cmp.attr,
  39. NULL,
  40. };
  41. struct attribute_group isa207_pmu_format_group = {
  42. .name = "format",
  43. .attrs = isa207_pmu_format_attr,
  44. };
  45. static inline bool event_is_fab_match(u64 event)
  46. {
  47. /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
  48. event &= 0xff0fe;
  49. /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
  50. return (event == 0x30056 || event == 0x4f052);
  51. }
  52. static bool is_event_valid(u64 event)
  53. {
  54. u64 valid_mask = EVENT_VALID_MASK;
  55. if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
  56. valid_mask = p9_EVENT_VALID_MASK;
  57. return !(event & ~valid_mask);
  58. }
  59. static u64 mmcra_sdar_mode(u64 event)
  60. {
  61. if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
  62. return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
  63. return MMCRA_SDAR_MODE_TLB;
  64. }
  65. static u64 thresh_cmp_val(u64 value)
  66. {
  67. if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
  68. return value << p9_MMCRA_THR_CMP_SHIFT;
  69. return value << MMCRA_THR_CMP_SHIFT;
  70. }
  71. static unsigned long combine_from_event(u64 event)
  72. {
  73. if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
  74. return p9_EVENT_COMBINE(event);
  75. return EVENT_COMBINE(event);
  76. }
  77. static unsigned long combine_shift(unsigned long pmc)
  78. {
  79. if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
  80. return p9_MMCR1_COMBINE_SHIFT(pmc);
  81. return MMCR1_COMBINE_SHIFT(pmc);
  82. }
  83. int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
  84. {
  85. unsigned int unit, pmc, cache, ebb;
  86. unsigned long mask, value;
  87. mask = value = 0;
  88. if (!is_event_valid(event))
  89. return -1;
  90. pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
  91. unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
  92. cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
  93. ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
  94. if (pmc) {
  95. u64 base_event;
  96. if (pmc > 6)
  97. return -1;
  98. /* Ignore Linux defined bits when checking event below */
  99. base_event = event & ~EVENT_LINUX_MASK;
  100. if (pmc >= 5 && base_event != 0x500fa &&
  101. base_event != 0x600f4)
  102. return -1;
  103. mask |= CNST_PMC_MASK(pmc);
  104. value |= CNST_PMC_VAL(pmc);
  105. }
  106. if (pmc <= 4) {
  107. /*
  108. * Add to number of counters in use. Note this includes events with
  109. * a PMC of 0 - they still need a PMC, it's just assigned later.
  110. * Don't count events on PMC 5 & 6, there is only one valid event
  111. * on each of those counters, and they are handled above.
  112. */
  113. mask |= CNST_NC_MASK;
  114. value |= CNST_NC_VAL;
  115. }
  116. if (unit >= 6 && unit <= 9) {
  117. /*
  118. * L2/L3 events contain a cache selector field, which is
  119. * supposed to be programmed into MMCRC. However MMCRC is only
  120. * HV writable, and there is no API for guest kernels to modify
  121. * it. The solution is for the hypervisor to initialise the
  122. * field to zeroes, and for us to only ever allow events that
  123. * have a cache selector of zero. The bank selector (bit 3) is
  124. * irrelevant, as long as the rest of the value is 0.
  125. */
  126. if (cache & 0x7)
  127. return -1;
  128. } else if (event & EVENT_IS_L1) {
  129. mask |= CNST_L1_QUAL_MASK;
  130. value |= CNST_L1_QUAL_VAL(cache);
  131. }
  132. if (event & EVENT_IS_MARKED) {
  133. mask |= CNST_SAMPLE_MASK;
  134. value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
  135. }
  136. /*
  137. * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
  138. * the threshold control bits are used for the match value.
  139. */
  140. if (event_is_fab_match(event)) {
  141. mask |= CNST_FAB_MATCH_MASK;
  142. value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
  143. } else {
  144. /*
  145. * Check the mantissa upper two bits are not zero, unless the
  146. * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
  147. */
  148. unsigned int cmp, exp;
  149. cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
  150. exp = cmp >> 7;
  151. if (exp && (cmp & 0x60) == 0)
  152. return -1;
  153. mask |= CNST_THRESH_MASK;
  154. value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
  155. }
  156. if (!pmc && ebb)
  157. /* EBB events must specify the PMC */
  158. return -1;
  159. if (event & EVENT_WANTS_BHRB) {
  160. if (!ebb)
  161. /* Only EBB events can request BHRB */
  162. return -1;
  163. mask |= CNST_IFM_MASK;
  164. value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
  165. }
  166. /*
  167. * All events must agree on EBB, either all request it or none.
  168. * EBB events are pinned & exclusive, so this should never actually
  169. * hit, but we leave it as a fallback in case.
  170. */
  171. mask |= CNST_EBB_VAL(ebb);
  172. value |= CNST_EBB_MASK;
  173. *maskp = mask;
  174. *valp = value;
  175. return 0;
  176. }
  177. int isa207_compute_mmcr(u64 event[], int n_ev,
  178. unsigned int hwc[], unsigned long mmcr[],
  179. struct perf_event *pevents[])
  180. {
  181. unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
  182. unsigned int pmc, pmc_inuse;
  183. int i;
  184. pmc_inuse = 0;
  185. /* First pass to count resource use */
  186. for (i = 0; i < n_ev; ++i) {
  187. pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
  188. if (pmc)
  189. pmc_inuse |= 1 << pmc;
  190. }
  191. mmcra = mmcr1 = mmcr2 = 0;
  192. /* Second pass: assign PMCs, set all MMCR1 fields */
  193. for (i = 0; i < n_ev; ++i) {
  194. pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
  195. unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
  196. combine = combine_from_event(event[i]);
  197. psel = event[i] & EVENT_PSEL_MASK;
  198. if (!pmc) {
  199. for (pmc = 1; pmc <= 4; ++pmc) {
  200. if (!(pmc_inuse & (1 << pmc)))
  201. break;
  202. }
  203. pmc_inuse |= 1 << pmc;
  204. }
  205. if (pmc <= 4) {
  206. mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
  207. mmcr1 |= combine << combine_shift(pmc);
  208. mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
  209. }
  210. /* In continuous sampling mode, update SDAR on TLB miss */
  211. mmcra |= mmcra_sdar_mode(event[i]);
  212. if (event[i] & EVENT_IS_L1) {
  213. cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
  214. mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
  215. cache >>= 1;
  216. mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
  217. }
  218. if (event[i] & EVENT_IS_MARKED) {
  219. mmcra |= MMCRA_SAMPLE_ENABLE;
  220. val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
  221. if (val) {
  222. mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
  223. mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
  224. }
  225. }
  226. /*
  227. * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
  228. * the threshold bits are used for the match value.
  229. */
  230. if (event_is_fab_match(event[i])) {
  231. mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
  232. EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
  233. } else {
  234. val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
  235. mmcra |= val << MMCRA_THR_CTL_SHIFT;
  236. val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
  237. mmcra |= val << MMCRA_THR_SEL_SHIFT;
  238. val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
  239. mmcra |= thresh_cmp_val(val);
  240. }
  241. if (event[i] & EVENT_WANTS_BHRB) {
  242. val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
  243. mmcra |= val << MMCRA_IFM_SHIFT;
  244. }
  245. if (pevents[i]->attr.exclude_user)
  246. mmcr2 |= MMCR2_FCP(pmc);
  247. if (pevents[i]->attr.exclude_hv)
  248. mmcr2 |= MMCR2_FCH(pmc);
  249. if (pevents[i]->attr.exclude_kernel) {
  250. if (cpu_has_feature(CPU_FTR_HVMODE))
  251. mmcr2 |= MMCR2_FCH(pmc);
  252. else
  253. mmcr2 |= MMCR2_FCS(pmc);
  254. }
  255. hwc[i] = pmc - 1;
  256. }
  257. /* Return MMCRx values */
  258. mmcr[0] = 0;
  259. /* pmc_inuse is 1-based */
  260. if (pmc_inuse & 2)
  261. mmcr[0] = MMCR0_PMC1CE;
  262. if (pmc_inuse & 0x7c)
  263. mmcr[0] |= MMCR0_PMCjCE;
  264. /* If we're not using PMC 5 or 6, freeze them */
  265. if (!(pmc_inuse & 0x60))
  266. mmcr[0] |= MMCR0_FC56;
  267. mmcr[1] = mmcr1;
  268. mmcr[2] = mmcra;
  269. mmcr[3] = mmcr2;
  270. return 0;
  271. }
  272. void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
  273. {
  274. if (pmc <= 3)
  275. mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
  276. }