perf_event.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. /*
  2. * PMU support
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. * Author: Will Deacon <will.deacon@arm.com>
  6. *
  7. * This code is based heavily on the ARMv7 perf event code.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <asm/irq_regs.h>
  22. #include <asm/perf_event.h>
  23. #include <asm/sysreg.h>
  24. #include <asm/virt.h>
  25. #include <linux/acpi.h>
  26. #include <linux/of.h>
  27. #include <linux/perf/arm_pmu.h>
  28. #include <linux/platform_device.h>
  29. /*
  30. * ARMv8 PMUv3 Performance Events handling code.
  31. * Common event types (some are defined in asm/perf_event.h).
  32. */
  33. /* At least one of the following is required. */
  34. #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
  35. #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
  36. /* Common architectural events. */
  37. #define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
  38. #define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
  39. #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
  40. #define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
  41. #define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
  42. #define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
  43. #define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
  44. #define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
  45. #define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
  46. #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
  47. #define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
  48. #define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
  49. /* Common microarchitectural events. */
  50. #define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
  51. #define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
  52. #define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
  53. #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
  54. #define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
  55. #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
  56. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
  57. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
  58. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
  59. #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
  60. #define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
  61. #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
  62. #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
  63. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
  64. #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
  65. #define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
  66. #define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
  67. #define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
  68. #define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
  69. #define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
  70. #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
  71. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
  72. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
  73. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
  74. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
  75. #define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
  76. #define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
  77. #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
  78. #define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
  79. /* ARMv8 recommended implementation defined event types */
  80. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
  81. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
  82. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
  83. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
  84. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
  85. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
  86. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
  87. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
  88. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
  89. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
  90. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
  91. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
  92. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
  93. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
  94. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
  95. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
  96. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
  97. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
  98. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
  99. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
  100. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
  101. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
  102. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
  103. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
  104. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
  105. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
  106. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
  107. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
  108. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
  109. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
  110. #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
  111. #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
  112. #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
  113. #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
  114. #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
  115. #define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
  116. #define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
  117. #define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
  118. #define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
  119. #define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
  120. #define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
  121. #define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
  122. #define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
  123. #define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
  124. #define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
  125. #define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
  126. #define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
  127. #define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
  128. #define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
  129. #define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
  130. #define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
  131. #define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
  132. #define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
  133. #define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
  134. #define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
  135. #define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
  136. #define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
  137. #define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
  138. #define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
  139. #define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
  140. #define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
  141. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
  142. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
  143. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
  144. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
  145. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
  146. #define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
  147. #define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
  148. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
  149. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
  150. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
  151. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
  152. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
  153. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
  154. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
  155. /* ARMv8 Cortex-A53 specific event types. */
  156. #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
  157. /* ARMv8 Cavium ThunderX specific event types. */
  158. #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
  159. #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
  160. #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
  161. #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
  162. #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
  163. /* PMUv3 HW events mapping. */
  164. /*
  165. * ARMv8 Architectural defined events, not all of these may
  166. * be supported on any given implementation. Undefined events will
  167. * be disabled at run-time.
  168. */
  169. static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
  170. PERF_MAP_ALL_UNSUPPORTED,
  171. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
  172. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
  173. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  174. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  175. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
  176. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  177. [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
  178. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
  179. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
  180. };
  181. static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  182. [PERF_COUNT_HW_CACHE_OP_MAX]
  183. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  184. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  185. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  186. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  187. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  188. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  189. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
  190. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
  191. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
  192. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
  193. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
  194. [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
  195. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
  196. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  197. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
  198. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  199. };
  200. static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  201. [PERF_COUNT_HW_CACHE_OP_MAX]
  202. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  203. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  204. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
  205. [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  206. [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  207. };
  208. static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  209. [PERF_COUNT_HW_CACHE_OP_MAX]
  210. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  211. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  212. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  213. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
  214. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  215. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
  216. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
  217. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
  218. [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  219. [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  220. };
  221. static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  222. [PERF_COUNT_HW_CACHE_OP_MAX]
  223. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  224. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  225. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  226. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  227. };
  228. static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  229. [PERF_COUNT_HW_CACHE_OP_MAX]
  230. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  231. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  232. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  233. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
  234. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  235. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
  236. [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
  237. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
  238. [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
  239. [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
  240. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
  241. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
  242. [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
  243. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
  244. };
  245. static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  246. [PERF_COUNT_HW_CACHE_OP_MAX]
  247. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  248. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  249. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  250. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
  251. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  252. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
  253. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
  254. [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
  255. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
  256. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
  257. [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  258. [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  259. };
  260. static ssize_t
  261. armv8pmu_events_sysfs_show(struct device *dev,
  262. struct device_attribute *attr, char *page)
  263. {
  264. struct perf_pmu_events_attr *pmu_attr;
  265. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  266. return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
  267. }
  268. #define ARMV8_EVENT_ATTR_RESOLVE(m) #m
  269. #define ARMV8_EVENT_ATTR(name, config) \
  270. PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
  271. config, armv8pmu_events_sysfs_show)
  272. ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
  273. ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
  274. ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
  275. ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
  276. ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
  277. ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
  278. ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
  279. ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
  280. ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
  281. ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
  282. ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
  283. ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
  284. ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
  285. ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
  286. ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
  287. ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
  288. ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
  289. ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
  290. ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
  291. ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
  292. ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
  293. ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
  294. ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
  295. ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
  296. ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
  297. ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
  298. ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
  299. ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
  300. ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
  301. ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
  302. /* Don't expose the chain event in /sys, since it's useless in isolation */
  303. ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
  304. ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
  305. ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
  306. ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
  307. ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
  308. ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
  309. ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
  310. ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
  311. ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
  312. ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
  313. ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
  314. ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
  315. ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
  316. ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
  317. ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
  318. ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
  319. ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
  320. ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
  321. static struct attribute *armv8_pmuv3_event_attrs[] = {
  322. &armv8_event_attr_sw_incr.attr.attr,
  323. &armv8_event_attr_l1i_cache_refill.attr.attr,
  324. &armv8_event_attr_l1i_tlb_refill.attr.attr,
  325. &armv8_event_attr_l1d_cache_refill.attr.attr,
  326. &armv8_event_attr_l1d_cache.attr.attr,
  327. &armv8_event_attr_l1d_tlb_refill.attr.attr,
  328. &armv8_event_attr_ld_retired.attr.attr,
  329. &armv8_event_attr_st_retired.attr.attr,
  330. &armv8_event_attr_inst_retired.attr.attr,
  331. &armv8_event_attr_exc_taken.attr.attr,
  332. &armv8_event_attr_exc_return.attr.attr,
  333. &armv8_event_attr_cid_write_retired.attr.attr,
  334. &armv8_event_attr_pc_write_retired.attr.attr,
  335. &armv8_event_attr_br_immed_retired.attr.attr,
  336. &armv8_event_attr_br_return_retired.attr.attr,
  337. &armv8_event_attr_unaligned_ldst_retired.attr.attr,
  338. &armv8_event_attr_br_mis_pred.attr.attr,
  339. &armv8_event_attr_cpu_cycles.attr.attr,
  340. &armv8_event_attr_br_pred.attr.attr,
  341. &armv8_event_attr_mem_access.attr.attr,
  342. &armv8_event_attr_l1i_cache.attr.attr,
  343. &armv8_event_attr_l1d_cache_wb.attr.attr,
  344. &armv8_event_attr_l2d_cache.attr.attr,
  345. &armv8_event_attr_l2d_cache_refill.attr.attr,
  346. &armv8_event_attr_l2d_cache_wb.attr.attr,
  347. &armv8_event_attr_bus_access.attr.attr,
  348. &armv8_event_attr_memory_error.attr.attr,
  349. &armv8_event_attr_inst_spec.attr.attr,
  350. &armv8_event_attr_ttbr_write_retired.attr.attr,
  351. &armv8_event_attr_bus_cycles.attr.attr,
  352. &armv8_event_attr_l1d_cache_allocate.attr.attr,
  353. &armv8_event_attr_l2d_cache_allocate.attr.attr,
  354. &armv8_event_attr_br_retired.attr.attr,
  355. &armv8_event_attr_br_mis_pred_retired.attr.attr,
  356. &armv8_event_attr_stall_frontend.attr.attr,
  357. &armv8_event_attr_stall_backend.attr.attr,
  358. &armv8_event_attr_l1d_tlb.attr.attr,
  359. &armv8_event_attr_l1i_tlb.attr.attr,
  360. &armv8_event_attr_l2i_cache.attr.attr,
  361. &armv8_event_attr_l2i_cache_refill.attr.attr,
  362. &armv8_event_attr_l3d_cache_allocate.attr.attr,
  363. &armv8_event_attr_l3d_cache_refill.attr.attr,
  364. &armv8_event_attr_l3d_cache.attr.attr,
  365. &armv8_event_attr_l3d_cache_wb.attr.attr,
  366. &armv8_event_attr_l2d_tlb_refill.attr.attr,
  367. &armv8_event_attr_l2i_tlb_refill.attr.attr,
  368. &armv8_event_attr_l2d_tlb.attr.attr,
  369. &armv8_event_attr_l2i_tlb.attr.attr,
  370. NULL,
  371. };
  372. static umode_t
  373. armv8pmu_event_attr_is_visible(struct kobject *kobj,
  374. struct attribute *attr, int unused)
  375. {
  376. struct device *dev = kobj_to_dev(kobj);
  377. struct pmu *pmu = dev_get_drvdata(dev);
  378. struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
  379. struct perf_pmu_events_attr *pmu_attr;
  380. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
  381. if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
  382. return attr->mode;
  383. return 0;
  384. }
  385. static struct attribute_group armv8_pmuv3_events_attr_group = {
  386. .name = "events",
  387. .attrs = armv8_pmuv3_event_attrs,
  388. .is_visible = armv8pmu_event_attr_is_visible,
  389. };
  390. PMU_FORMAT_ATTR(event, "config:0-15");
  391. static struct attribute *armv8_pmuv3_format_attrs[] = {
  392. &format_attr_event.attr,
  393. NULL,
  394. };
  395. static struct attribute_group armv8_pmuv3_format_attr_group = {
  396. .name = "format",
  397. .attrs = armv8_pmuv3_format_attrs,
  398. };
  399. /*
  400. * Perf Events' indices
  401. */
  402. #define ARMV8_IDX_CYCLE_COUNTER 0
  403. #define ARMV8_IDX_COUNTER0 1
  404. #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
  405. (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  406. /*
  407. * ARMv8 low level PMU access
  408. */
  409. /*
  410. * Perf Event to low level counters mapping
  411. */
  412. #define ARMV8_IDX_TO_COUNTER(x) \
  413. (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
  414. static inline u32 armv8pmu_pmcr_read(void)
  415. {
  416. return read_sysreg(pmcr_el0);
  417. }
  418. static inline void armv8pmu_pmcr_write(u32 val)
  419. {
  420. val &= ARMV8_PMU_PMCR_MASK;
  421. isb();
  422. write_sysreg(val, pmcr_el0);
  423. }
  424. static inline int armv8pmu_has_overflowed(u32 pmovsr)
  425. {
  426. return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
  427. }
  428. static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
  429. {
  430. return idx >= ARMV8_IDX_CYCLE_COUNTER &&
  431. idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
  432. }
  433. static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
  434. {
  435. return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
  436. }
  437. static inline int armv8pmu_select_counter(int idx)
  438. {
  439. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  440. write_sysreg(counter, pmselr_el0);
  441. isb();
  442. return idx;
  443. }
  444. static inline u32 armv8pmu_read_counter(struct perf_event *event)
  445. {
  446. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  447. struct hw_perf_event *hwc = &event->hw;
  448. int idx = hwc->idx;
  449. u32 value = 0;
  450. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  451. pr_err("CPU%u reading wrong counter %d\n",
  452. smp_processor_id(), idx);
  453. else if (idx == ARMV8_IDX_CYCLE_COUNTER)
  454. value = read_sysreg(pmccntr_el0);
  455. else if (armv8pmu_select_counter(idx) == idx)
  456. value = read_sysreg(pmxevcntr_el0);
  457. return value;
  458. }
  459. static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
  460. {
  461. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  462. struct hw_perf_event *hwc = &event->hw;
  463. int idx = hwc->idx;
  464. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  465. pr_err("CPU%u writing wrong counter %d\n",
  466. smp_processor_id(), idx);
  467. else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
  468. /*
  469. * Set the upper 32bits as this is a 64bit counter but we only
  470. * count using the lower 32bits and we want an interrupt when
  471. * it overflows.
  472. */
  473. u64 value64 = 0xffffffff00000000ULL | value;
  474. write_sysreg(value64, pmccntr_el0);
  475. } else if (armv8pmu_select_counter(idx) == idx)
  476. write_sysreg(value, pmxevcntr_el0);
  477. }
  478. static inline void armv8pmu_write_evtype(int idx, u32 val)
  479. {
  480. if (armv8pmu_select_counter(idx) == idx) {
  481. val &= ARMV8_PMU_EVTYPE_MASK;
  482. write_sysreg(val, pmxevtyper_el0);
  483. }
  484. }
  485. static inline int armv8pmu_enable_counter(int idx)
  486. {
  487. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  488. write_sysreg(BIT(counter), pmcntenset_el0);
  489. return idx;
  490. }
  491. static inline int armv8pmu_disable_counter(int idx)
  492. {
  493. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  494. write_sysreg(BIT(counter), pmcntenclr_el0);
  495. return idx;
  496. }
  497. static inline int armv8pmu_enable_intens(int idx)
  498. {
  499. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  500. write_sysreg(BIT(counter), pmintenset_el1);
  501. return idx;
  502. }
  503. static inline int armv8pmu_disable_intens(int idx)
  504. {
  505. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  506. write_sysreg(BIT(counter), pmintenclr_el1);
  507. isb();
  508. /* Clear the overflow flag in case an interrupt is pending. */
  509. write_sysreg(BIT(counter), pmovsclr_el0);
  510. isb();
  511. return idx;
  512. }
  513. static inline u32 armv8pmu_getreset_flags(void)
  514. {
  515. u32 value;
  516. /* Read */
  517. value = read_sysreg(pmovsclr_el0);
  518. /* Write to clear flags */
  519. value &= ARMV8_PMU_OVSR_MASK;
  520. write_sysreg(value, pmovsclr_el0);
  521. return value;
  522. }
  523. static void armv8pmu_enable_event(struct perf_event *event)
  524. {
  525. unsigned long flags;
  526. struct hw_perf_event *hwc = &event->hw;
  527. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  528. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  529. int idx = hwc->idx;
  530. /*
  531. * Enable counter and interrupt, and set the counter to count
  532. * the event that we're interested in.
  533. */
  534. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  535. /*
  536. * Disable counter
  537. */
  538. armv8pmu_disable_counter(idx);
  539. /*
  540. * Set event (if destined for PMNx counters).
  541. */
  542. armv8pmu_write_evtype(idx, hwc->config_base);
  543. /*
  544. * Enable interrupt for this counter
  545. */
  546. armv8pmu_enable_intens(idx);
  547. /*
  548. * Enable counter
  549. */
  550. armv8pmu_enable_counter(idx);
  551. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  552. }
  553. static void armv8pmu_disable_event(struct perf_event *event)
  554. {
  555. unsigned long flags;
  556. struct hw_perf_event *hwc = &event->hw;
  557. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  558. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  559. int idx = hwc->idx;
  560. /*
  561. * Disable counter and interrupt
  562. */
  563. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  564. /*
  565. * Disable counter
  566. */
  567. armv8pmu_disable_counter(idx);
  568. /*
  569. * Disable interrupt for this counter
  570. */
  571. armv8pmu_disable_intens(idx);
  572. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  573. }
  574. static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
  575. {
  576. u32 pmovsr;
  577. struct perf_sample_data data;
  578. struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
  579. struct pt_regs *regs;
  580. int idx;
  581. /*
  582. * Get and reset the IRQ flags
  583. */
  584. pmovsr = armv8pmu_getreset_flags();
  585. /*
  586. * Did an overflow occur?
  587. */
  588. if (!armv8pmu_has_overflowed(pmovsr))
  589. return IRQ_NONE;
  590. /*
  591. * Handle the counter(s) overflow(s)
  592. */
  593. regs = get_irq_regs();
  594. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  595. struct perf_event *event = cpuc->events[idx];
  596. struct hw_perf_event *hwc;
  597. /* Ignore if we don't have an event. */
  598. if (!event)
  599. continue;
  600. /*
  601. * We have a single interrupt for all counters. Check that
  602. * each counter has overflowed before we process it.
  603. */
  604. if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
  605. continue;
  606. hwc = &event->hw;
  607. armpmu_event_update(event);
  608. perf_sample_data_init(&data, 0, hwc->last_period);
  609. if (!armpmu_event_set_period(event))
  610. continue;
  611. if (perf_event_overflow(event, &data, regs))
  612. cpu_pmu->disable(event);
  613. }
  614. /*
  615. * Handle the pending perf events.
  616. *
  617. * Note: this call *must* be run with interrupts disabled. For
  618. * platforms that can have the PMU interrupts raised as an NMI, this
  619. * will not work.
  620. */
  621. irq_work_run();
  622. return IRQ_HANDLED;
  623. }
  624. static void armv8pmu_start(struct arm_pmu *cpu_pmu)
  625. {
  626. unsigned long flags;
  627. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  628. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  629. /* Enable all counters */
  630. armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
  631. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  632. }
  633. static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
  634. {
  635. unsigned long flags;
  636. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  637. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  638. /* Disable all counters */
  639. armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
  640. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  641. }
  642. static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
  643. struct perf_event *event)
  644. {
  645. int idx;
  646. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  647. struct hw_perf_event *hwc = &event->hw;
  648. unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
  649. /* Always prefer to place a cycle counter into the cycle counter. */
  650. if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
  651. if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
  652. return ARMV8_IDX_CYCLE_COUNTER;
  653. }
  654. /*
  655. * Otherwise use events counters
  656. */
  657. for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  658. if (!test_and_set_bit(idx, cpuc->used_mask))
  659. return idx;
  660. }
  661. /* The counters are all in use. */
  662. return -EAGAIN;
  663. }
  664. /*
  665. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  666. */
  667. static int armv8pmu_set_event_filter(struct hw_perf_event *event,
  668. struct perf_event_attr *attr)
  669. {
  670. unsigned long config_base = 0;
  671. if (attr->exclude_idle)
  672. return -EPERM;
  673. /*
  674. * If we're running in hyp mode, then we *are* the hypervisor.
  675. * Therefore we ignore exclude_hv in this configuration, since
  676. * there's no hypervisor to sample anyway. This is consistent
  677. * with other architectures (x86 and Power).
  678. */
  679. if (is_kernel_in_hyp_mode()) {
  680. if (!attr->exclude_kernel)
  681. config_base |= ARMV8_PMU_INCLUDE_EL2;
  682. } else {
  683. if (attr->exclude_kernel)
  684. config_base |= ARMV8_PMU_EXCLUDE_EL1;
  685. if (!attr->exclude_hv)
  686. config_base |= ARMV8_PMU_INCLUDE_EL2;
  687. }
  688. if (attr->exclude_user)
  689. config_base |= ARMV8_PMU_EXCLUDE_EL0;
  690. /*
  691. * Install the filter into config_base as this is used to
  692. * construct the event type.
  693. */
  694. event->config_base = config_base;
  695. return 0;
  696. }
  697. static void armv8pmu_reset(void *info)
  698. {
  699. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  700. u32 idx, nb_cnt = cpu_pmu->num_events;
  701. /* The counter and interrupt enable registers are unknown at reset. */
  702. for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  703. armv8pmu_disable_counter(idx);
  704. armv8pmu_disable_intens(idx);
  705. }
  706. /*
  707. * Initialize & Reset PMNC. Request overflow interrupt for
  708. * 64 bit cycle counter but cheat in armv8pmu_write_counter().
  709. */
  710. armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
  711. ARMV8_PMU_PMCR_LC);
  712. }
  713. static int __armv8_pmuv3_map_event(struct perf_event *event,
  714. const unsigned (*extra_event_map)
  715. [PERF_COUNT_HW_MAX],
  716. const unsigned (*extra_cache_map)
  717. [PERF_COUNT_HW_CACHE_MAX]
  718. [PERF_COUNT_HW_CACHE_OP_MAX]
  719. [PERF_COUNT_HW_CACHE_RESULT_MAX])
  720. {
  721. int hw_event_id;
  722. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  723. hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
  724. &armv8_pmuv3_perf_cache_map,
  725. ARMV8_PMU_EVTYPE_EVENT);
  726. /* Onl expose micro/arch events supported by this PMU */
  727. if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
  728. && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
  729. return hw_event_id;
  730. }
  731. return armpmu_map_event(event, extra_event_map, extra_cache_map,
  732. ARMV8_PMU_EVTYPE_EVENT);
  733. }
  734. static int armv8_pmuv3_map_event(struct perf_event *event)
  735. {
  736. return __armv8_pmuv3_map_event(event, NULL, NULL);
  737. }
  738. static int armv8_a53_map_event(struct perf_event *event)
  739. {
  740. return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
  741. }
  742. static int armv8_a57_map_event(struct perf_event *event)
  743. {
  744. return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
  745. }
  746. static int armv8_a73_map_event(struct perf_event *event)
  747. {
  748. return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
  749. }
  750. static int armv8_thunder_map_event(struct perf_event *event)
  751. {
  752. return __armv8_pmuv3_map_event(event, NULL,
  753. &armv8_thunder_perf_cache_map);
  754. }
  755. static int armv8_vulcan_map_event(struct perf_event *event)
  756. {
  757. return __armv8_pmuv3_map_event(event, NULL,
  758. &armv8_vulcan_perf_cache_map);
  759. }
  760. struct armv8pmu_probe_info {
  761. struct arm_pmu *pmu;
  762. bool present;
  763. };
  764. static void __armv8pmu_probe_pmu(void *info)
  765. {
  766. struct armv8pmu_probe_info *probe = info;
  767. struct arm_pmu *cpu_pmu = probe->pmu;
  768. u64 dfr0;
  769. u32 pmceid[2];
  770. int pmuver;
  771. dfr0 = read_sysreg(id_aa64dfr0_el1);
  772. pmuver = cpuid_feature_extract_unsigned_field(dfr0,
  773. ID_AA64DFR0_PMUVER_SHIFT);
  774. if (pmuver == 0xf || pmuver == 0)
  775. return;
  776. probe->present = true;
  777. /* Read the nb of CNTx counters supported from PMNC */
  778. cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
  779. & ARMV8_PMU_PMCR_N_MASK;
  780. /* Add the CPU cycles counter */
  781. cpu_pmu->num_events += 1;
  782. pmceid[0] = read_sysreg(pmceid0_el0);
  783. pmceid[1] = read_sysreg(pmceid1_el0);
  784. bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
  785. pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
  786. }
  787. static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
  788. {
  789. struct armv8pmu_probe_info probe = {
  790. .pmu = cpu_pmu,
  791. .present = false,
  792. };
  793. int ret;
  794. ret = smp_call_function_any(&cpu_pmu->supported_cpus,
  795. __armv8pmu_probe_pmu,
  796. &probe, 1);
  797. if (ret)
  798. return ret;
  799. return probe.present ? 0 : -ENODEV;
  800. }
  801. static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
  802. {
  803. int ret = armv8pmu_probe_pmu(cpu_pmu);
  804. if (ret)
  805. return ret;
  806. cpu_pmu->handle_irq = armv8pmu_handle_irq,
  807. cpu_pmu->enable = armv8pmu_enable_event,
  808. cpu_pmu->disable = armv8pmu_disable_event,
  809. cpu_pmu->read_counter = armv8pmu_read_counter,
  810. cpu_pmu->write_counter = armv8pmu_write_counter,
  811. cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
  812. cpu_pmu->start = armv8pmu_start,
  813. cpu_pmu->stop = armv8pmu_stop,
  814. cpu_pmu->reset = armv8pmu_reset,
  815. cpu_pmu->max_period = (1LLU << 32) - 1,
  816. cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
  817. return 0;
  818. }
  819. static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
  820. {
  821. int ret = armv8_pmu_init(cpu_pmu);
  822. if (ret)
  823. return ret;
  824. cpu_pmu->name = "armv8_pmuv3";
  825. cpu_pmu->map_event = armv8_pmuv3_map_event;
  826. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  827. &armv8_pmuv3_events_attr_group;
  828. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  829. &armv8_pmuv3_format_attr_group;
  830. return 0;
  831. }
  832. static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
  833. {
  834. int ret = armv8_pmu_init(cpu_pmu);
  835. if (ret)
  836. return ret;
  837. cpu_pmu->name = "armv8_cortex_a35";
  838. cpu_pmu->map_event = armv8_a53_map_event;
  839. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  840. &armv8_pmuv3_events_attr_group;
  841. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  842. &armv8_pmuv3_format_attr_group;
  843. return 0;
  844. }
  845. static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
  846. {
  847. int ret = armv8_pmu_init(cpu_pmu);
  848. if (ret)
  849. return ret;
  850. cpu_pmu->name = "armv8_cortex_a53";
  851. cpu_pmu->map_event = armv8_a53_map_event;
  852. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  853. &armv8_pmuv3_events_attr_group;
  854. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  855. &armv8_pmuv3_format_attr_group;
  856. return 0;
  857. }
  858. static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
  859. {
  860. int ret = armv8_pmu_init(cpu_pmu);
  861. if (ret)
  862. return ret;
  863. cpu_pmu->name = "armv8_cortex_a57";
  864. cpu_pmu->map_event = armv8_a57_map_event;
  865. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  866. &armv8_pmuv3_events_attr_group;
  867. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  868. &armv8_pmuv3_format_attr_group;
  869. return 0;
  870. }
  871. static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
  872. {
  873. int ret = armv8_pmu_init(cpu_pmu);
  874. if (ret)
  875. return ret;
  876. cpu_pmu->name = "armv8_cortex_a72";
  877. cpu_pmu->map_event = armv8_a57_map_event;
  878. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  879. &armv8_pmuv3_events_attr_group;
  880. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  881. &armv8_pmuv3_format_attr_group;
  882. return 0;
  883. }
  884. static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
  885. {
  886. int ret = armv8_pmu_init(cpu_pmu);
  887. if (ret)
  888. return ret;
  889. cpu_pmu->name = "armv8_cortex_a73";
  890. cpu_pmu->map_event = armv8_a73_map_event;
  891. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  892. &armv8_pmuv3_events_attr_group;
  893. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  894. &armv8_pmuv3_format_attr_group;
  895. return 0;
  896. }
  897. static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
  898. {
  899. int ret = armv8_pmu_init(cpu_pmu);
  900. if (ret)
  901. return ret;
  902. cpu_pmu->name = "armv8_cavium_thunder";
  903. cpu_pmu->map_event = armv8_thunder_map_event;
  904. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  905. &armv8_pmuv3_events_attr_group;
  906. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  907. &armv8_pmuv3_format_attr_group;
  908. return 0;
  909. }
  910. static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
  911. {
  912. int ret = armv8_pmu_init(cpu_pmu);
  913. if (ret)
  914. return ret;
  915. cpu_pmu->name = "armv8_brcm_vulcan";
  916. cpu_pmu->map_event = armv8_vulcan_map_event;
  917. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  918. &armv8_pmuv3_events_attr_group;
  919. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  920. &armv8_pmuv3_format_attr_group;
  921. return 0;
  922. }
  923. static const struct of_device_id armv8_pmu_of_device_ids[] = {
  924. {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
  925. {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
  926. {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
  927. {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
  928. {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
  929. {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
  930. {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
  931. {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
  932. {},
  933. };
  934. static int armv8_pmu_device_probe(struct platform_device *pdev)
  935. {
  936. return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
  937. }
  938. static struct platform_driver armv8_pmu_driver = {
  939. .driver = {
  940. .name = ARMV8_PMU_PDEV_NAME,
  941. .of_match_table = armv8_pmu_of_device_ids,
  942. },
  943. .probe = armv8_pmu_device_probe,
  944. };
  945. static int __init armv8_pmu_driver_init(void)
  946. {
  947. if (acpi_disabled)
  948. return platform_driver_register(&armv8_pmu_driver);
  949. else
  950. return arm_pmu_acpi_probe(armv8_pmuv3_init);
  951. }
  952. device_initcall(armv8_pmu_driver_init)