perf_event.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921
  1. /*
  2. * PMU support
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. * Author: Will Deacon <will.deacon@arm.com>
  6. *
  7. * This code is based heavily on the ARMv7 perf event code.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <asm/irq_regs.h>
  22. #include <asm/virt.h>
  23. #include <linux/of.h>
  24. #include <linux/perf/arm_pmu.h>
  25. #include <linux/platform_device.h>
  26. /*
  27. * ARMv8 PMUv3 Performance Events handling code.
  28. * Common event types.
  29. */
  30. /* Required events. */
  31. #define ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR 0x00
  32. #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL 0x03
  33. #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS 0x04
  34. #define ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED 0x10
  35. #define ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES 0x11
  36. #define ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED 0x12
  37. /* At least one of the following is required. */
  38. #define ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED 0x08
  39. #define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x1B
  40. /* Common architectural events. */
  41. #define ARMV8_PMUV3_PERFCTR_MEM_READ 0x06
  42. #define ARMV8_PMUV3_PERFCTR_MEM_WRITE 0x07
  43. #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
  44. #define ARMV8_PMUV3_PERFCTR_EXC_EXECUTED 0x0A
  45. #define ARMV8_PMUV3_PERFCTR_CID_WRITE 0x0B
  46. #define ARMV8_PMUV3_PERFCTR_PC_WRITE 0x0C
  47. #define ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH 0x0D
  48. #define ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN 0x0E
  49. #define ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
  50. #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE 0x1C
  51. #define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
  52. #define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
  53. /* Common microarchitectural events. */
  54. #define ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL 0x01
  55. #define ARMV8_PMUV3_PERFCTR_ITLB_REFILL 0x02
  56. #define ARMV8_PMUV3_PERFCTR_DTLB_REFILL 0x05
  57. #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
  58. #define ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS 0x14
  59. #define ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB 0x15
  60. #define ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS 0x16
  61. #define ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL 0x17
  62. #define ARMV8_PMUV3_PERFCTR_L2_CACHE_WB 0x18
  63. #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
  64. #define ARMV8_PMUV3_PERFCTR_MEM_ERROR 0x1A
  65. #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
  66. #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
  67. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
  68. #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
  69. #define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
  70. #define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
  71. #define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
  72. #define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
  73. #define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
  74. #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
  75. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
  76. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
  77. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
  78. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
  79. #define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
  80. #define ARMV8_PMUV3_PERFCTR_L21_TLB_REFILL 0x2E
  81. #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
  82. #define ARMV8_PMUV3_PERFCTR_L21_TLB 0x30
  83. /* ARMv8 implementation defined event types. */
  84. #define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD 0x40
  85. #define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST 0x41
  86. #define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD 0x42
  87. #define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_ST 0x43
  88. #define ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD 0x4C
  89. #define ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST 0x4D
  90. #define ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_LD 0x4E
  91. #define ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_ST 0x4F
  92. /* ARMv8 Cortex-A53 specific event types. */
  93. #define ARMV8_A53_PERFCTR_PREFETCH_LINEFILL 0xC2
  94. /* ARMv8 Cavium ThunderX specific event types. */
  95. #define ARMV8_THUNDER_PERFCTR_L1_DCACHE_MISS_ST 0xE9
  96. #define ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_ACCESS 0xEA
  97. #define ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_MISS 0xEB
  98. #define ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_ACCESS 0xEC
  99. #define ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_MISS 0xED
  100. /* PMUv3 HW events mapping. */
  101. static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
  102. PERF_MAP_ALL_UNSUPPORTED,
  103. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  104. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  105. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  106. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  107. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  108. };
  109. /* ARM Cortex-A53 HW events mapping. */
  110. static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
  111. PERF_MAP_ALL_UNSUPPORTED,
  112. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  113. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  114. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  115. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  116. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
  117. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  118. [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
  119. };
  120. /* ARM Cortex-A57 and Cortex-A72 events mapping. */
  121. static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
  122. PERF_MAP_ALL_UNSUPPORTED,
  123. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  124. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  125. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  126. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  127. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  128. [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
  129. };
  130. static const unsigned armv8_thunder_perf_map[PERF_COUNT_HW_MAX] = {
  131. PERF_MAP_ALL_UNSUPPORTED,
  132. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
  133. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
  134. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  135. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  136. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
  137. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  138. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
  139. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
  140. };
  141. static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  142. [PERF_COUNT_HW_CACHE_OP_MAX]
  143. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  144. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  145. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  146. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  147. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  148. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  149. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  150. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  151. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  152. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  153. };
  154. static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  155. [PERF_COUNT_HW_CACHE_OP_MAX]
  156. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  157. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  158. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  159. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  160. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
  161. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
  162. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
  163. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
  164. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
  165. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
  166. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  167. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  168. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  169. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  170. };
  171. static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  172. [PERF_COUNT_HW_CACHE_OP_MAX]
  173. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  174. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  175. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD,
  176. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD,
  177. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST,
  178. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_ST,
  179. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
  180. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
  181. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD,
  182. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST,
  183. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
  184. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  185. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  186. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  187. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  188. };
  189. static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  190. [PERF_COUNT_HW_CACHE_OP_MAX]
  191. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  192. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  193. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD,
  194. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD,
  195. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST,
  196. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_MISS_ST,
  197. [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_ACCESS,
  198. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_MISS,
  199. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
  200. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
  201. [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_ACCESS,
  202. [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_MISS,
  203. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_LD,
  204. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD,
  205. [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_ST,
  206. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST,
  207. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
  208. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  209. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  210. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
  211. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
  212. };
  213. #define ARMV8_EVENT_ATTR_RESOLVE(m) #m
  214. #define ARMV8_EVENT_ATTR(name, config) \
  215. PMU_EVENT_ATTR_STRING(name, armv8_event_attr_##name, \
  216. "event=" ARMV8_EVENT_ATTR_RESOLVE(config))
  217. ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR);
  218. ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL);
  219. ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_ITLB_REFILL);
  220. ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL);
  221. ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS);
  222. ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_DTLB_REFILL);
  223. ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_MEM_READ);
  224. ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_MEM_WRITE);
  225. ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED);
  226. ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
  227. ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_EXECUTED);
  228. ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE);
  229. ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE);
  230. ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH);
  231. ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN);
  232. ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS);
  233. ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED);
  234. ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES);
  235. ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED);
  236. ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
  237. ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS);
  238. ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB);
  239. ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS);
  240. ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL);
  241. ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2_CACHE_WB);
  242. ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
  243. ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEM_ERROR);
  244. ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC);
  245. ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE);
  246. ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
  247. ARMV8_EVENT_ATTR(chain, ARMV8_PMUV3_PERFCTR_CHAIN);
  248. ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
  249. ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
  250. ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
  251. ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
  252. ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
  253. ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
  254. ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
  255. ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
  256. ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
  257. ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
  258. ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
  259. ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
  260. ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
  261. ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
  262. ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
  263. ARMV8_EVENT_ATTR(l21_tlb_refill, ARMV8_PMUV3_PERFCTR_L21_TLB_REFILL);
  264. ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
  265. ARMV8_EVENT_ATTR(l21_tlb, ARMV8_PMUV3_PERFCTR_L21_TLB);
  266. static struct attribute *armv8_pmuv3_event_attrs[] = {
  267. &armv8_event_attr_sw_incr.attr.attr,
  268. &armv8_event_attr_l1i_cache_refill.attr.attr,
  269. &armv8_event_attr_l1i_tlb_refill.attr.attr,
  270. &armv8_event_attr_l1d_cache_refill.attr.attr,
  271. &armv8_event_attr_l1d_cache.attr.attr,
  272. &armv8_event_attr_l1d_tlb_refill.attr.attr,
  273. &armv8_event_attr_ld_retired.attr.attr,
  274. &armv8_event_attr_st_retired.attr.attr,
  275. &armv8_event_attr_inst_retired.attr.attr,
  276. &armv8_event_attr_exc_taken.attr.attr,
  277. &armv8_event_attr_exc_return.attr.attr,
  278. &armv8_event_attr_cid_write_retired.attr.attr,
  279. &armv8_event_attr_pc_write_retired.attr.attr,
  280. &armv8_event_attr_br_immed_retired.attr.attr,
  281. &armv8_event_attr_br_return_retired.attr.attr,
  282. &armv8_event_attr_unaligned_ldst_retired.attr.attr,
  283. &armv8_event_attr_br_mis_pred.attr.attr,
  284. &armv8_event_attr_cpu_cycles.attr.attr,
  285. &armv8_event_attr_br_pred.attr.attr,
  286. &armv8_event_attr_mem_access.attr.attr,
  287. &armv8_event_attr_l1i_cache.attr.attr,
  288. &armv8_event_attr_l1d_cache_wb.attr.attr,
  289. &armv8_event_attr_l2d_cache.attr.attr,
  290. &armv8_event_attr_l2d_cache_refill.attr.attr,
  291. &armv8_event_attr_l2d_cache_wb.attr.attr,
  292. &armv8_event_attr_bus_access.attr.attr,
  293. &armv8_event_attr_memory_error.attr.attr,
  294. &armv8_event_attr_inst_spec.attr.attr,
  295. &armv8_event_attr_ttbr_write_retired.attr.attr,
  296. &armv8_event_attr_bus_cycles.attr.attr,
  297. &armv8_event_attr_chain.attr.attr,
  298. &armv8_event_attr_l1d_cache_allocate.attr.attr,
  299. &armv8_event_attr_l2d_cache_allocate.attr.attr,
  300. &armv8_event_attr_br_retired.attr.attr,
  301. &armv8_event_attr_br_mis_pred_retired.attr.attr,
  302. &armv8_event_attr_stall_frontend.attr.attr,
  303. &armv8_event_attr_stall_backend.attr.attr,
  304. &armv8_event_attr_l1d_tlb.attr.attr,
  305. &armv8_event_attr_l1i_tlb.attr.attr,
  306. &armv8_event_attr_l2i_cache.attr.attr,
  307. &armv8_event_attr_l2i_cache_refill.attr.attr,
  308. &armv8_event_attr_l3d_cache_allocate.attr.attr,
  309. &armv8_event_attr_l3d_cache_refill.attr.attr,
  310. &armv8_event_attr_l3d_cache.attr.attr,
  311. &armv8_event_attr_l3d_cache_wb.attr.attr,
  312. &armv8_event_attr_l2d_tlb_refill.attr.attr,
  313. &armv8_event_attr_l21_tlb_refill.attr.attr,
  314. &armv8_event_attr_l2d_tlb.attr.attr,
  315. &armv8_event_attr_l21_tlb.attr.attr,
  316. NULL,
  317. };
  318. static struct attribute_group armv8_pmuv3_events_attr_group = {
  319. .name = "events",
  320. .attrs = armv8_pmuv3_event_attrs,
  321. };
  322. PMU_FORMAT_ATTR(event, "config:0-9");
  323. static struct attribute *armv8_pmuv3_format_attrs[] = {
  324. &format_attr_event.attr,
  325. NULL,
  326. };
  327. static struct attribute_group armv8_pmuv3_format_attr_group = {
  328. .name = "format",
  329. .attrs = armv8_pmuv3_format_attrs,
  330. };
  331. static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
  332. &armv8_pmuv3_events_attr_group,
  333. &armv8_pmuv3_format_attr_group,
  334. NULL,
  335. };
  336. /*
  337. * Perf Events' indices
  338. */
  339. #define ARMV8_IDX_CYCLE_COUNTER 0
  340. #define ARMV8_IDX_COUNTER0 1
  341. #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
  342. (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  343. #define ARMV8_MAX_COUNTERS 32
  344. #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
  345. /*
  346. * ARMv8 low level PMU access
  347. */
  348. /*
  349. * Perf Event to low level counters mapping
  350. */
  351. #define ARMV8_IDX_TO_COUNTER(x) \
  352. (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
  353. /*
  354. * Per-CPU PMCR: config reg
  355. */
  356. #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
  357. #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
  358. #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
  359. #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  360. #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
  361. #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  362. #define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
  363. #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
  364. #define ARMV8_PMCR_N_MASK 0x1f
  365. #define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */
  366. /*
  367. * PMOVSR: counters overflow flag status reg
  368. */
  369. #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
  370. #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
  371. /*
  372. * PMXEVTYPER: Event selection reg
  373. */
  374. #define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
  375. #define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
  376. /*
  377. * Event filters for PMUv3
  378. */
  379. #define ARMV8_EXCLUDE_EL1 (1 << 31)
  380. #define ARMV8_EXCLUDE_EL0 (1 << 30)
  381. #define ARMV8_INCLUDE_EL2 (1 << 27)
  382. static inline u32 armv8pmu_pmcr_read(void)
  383. {
  384. u32 val;
  385. asm volatile("mrs %0, pmcr_el0" : "=r" (val));
  386. return val;
  387. }
  388. static inline void armv8pmu_pmcr_write(u32 val)
  389. {
  390. val &= ARMV8_PMCR_MASK;
  391. isb();
  392. asm volatile("msr pmcr_el0, %0" :: "r" (val));
  393. }
  394. static inline int armv8pmu_has_overflowed(u32 pmovsr)
  395. {
  396. return pmovsr & ARMV8_OVERFLOWED_MASK;
  397. }
  398. static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
  399. {
  400. return idx >= ARMV8_IDX_CYCLE_COUNTER &&
  401. idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
  402. }
  403. static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
  404. {
  405. return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
  406. }
  407. static inline int armv8pmu_select_counter(int idx)
  408. {
  409. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  410. asm volatile("msr pmselr_el0, %0" :: "r" (counter));
  411. isb();
  412. return idx;
  413. }
  414. static inline u32 armv8pmu_read_counter(struct perf_event *event)
  415. {
  416. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  417. struct hw_perf_event *hwc = &event->hw;
  418. int idx = hwc->idx;
  419. u32 value = 0;
  420. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  421. pr_err("CPU%u reading wrong counter %d\n",
  422. smp_processor_id(), idx);
  423. else if (idx == ARMV8_IDX_CYCLE_COUNTER)
  424. asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
  425. else if (armv8pmu_select_counter(idx) == idx)
  426. asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
  427. return value;
  428. }
  429. static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
  430. {
  431. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  432. struct hw_perf_event *hwc = &event->hw;
  433. int idx = hwc->idx;
  434. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  435. pr_err("CPU%u writing wrong counter %d\n",
  436. smp_processor_id(), idx);
  437. else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
  438. /*
  439. * Set the upper 32bits as this is a 64bit counter but we only
  440. * count using the lower 32bits and we want an interrupt when
  441. * it overflows.
  442. */
  443. u64 value64 = 0xffffffff00000000ULL | value;
  444. asm volatile("msr pmccntr_el0, %0" :: "r" (value64));
  445. } else if (armv8pmu_select_counter(idx) == idx)
  446. asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
  447. }
  448. static inline void armv8pmu_write_evtype(int idx, u32 val)
  449. {
  450. if (armv8pmu_select_counter(idx) == idx) {
  451. val &= ARMV8_EVTYPE_MASK;
  452. asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
  453. }
  454. }
  455. static inline int armv8pmu_enable_counter(int idx)
  456. {
  457. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  458. asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
  459. return idx;
  460. }
  461. static inline int armv8pmu_disable_counter(int idx)
  462. {
  463. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  464. asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
  465. return idx;
  466. }
  467. static inline int armv8pmu_enable_intens(int idx)
  468. {
  469. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  470. asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
  471. return idx;
  472. }
  473. static inline int armv8pmu_disable_intens(int idx)
  474. {
  475. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  476. asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
  477. isb();
  478. /* Clear the overflow flag in case an interrupt is pending. */
  479. asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
  480. isb();
  481. return idx;
  482. }
  483. static inline u32 armv8pmu_getreset_flags(void)
  484. {
  485. u32 value;
  486. /* Read */
  487. asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
  488. /* Write to clear flags */
  489. value &= ARMV8_OVSR_MASK;
  490. asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
  491. return value;
  492. }
  493. static void armv8pmu_enable_event(struct perf_event *event)
  494. {
  495. unsigned long flags;
  496. struct hw_perf_event *hwc = &event->hw;
  497. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  498. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  499. int idx = hwc->idx;
  500. /*
  501. * Enable counter and interrupt, and set the counter to count
  502. * the event that we're interested in.
  503. */
  504. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  505. /*
  506. * Disable counter
  507. */
  508. armv8pmu_disable_counter(idx);
  509. /*
  510. * Set event (if destined for PMNx counters).
  511. */
  512. armv8pmu_write_evtype(idx, hwc->config_base);
  513. /*
  514. * Enable interrupt for this counter
  515. */
  516. armv8pmu_enable_intens(idx);
  517. /*
  518. * Enable counter
  519. */
  520. armv8pmu_enable_counter(idx);
  521. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  522. }
  523. static void armv8pmu_disable_event(struct perf_event *event)
  524. {
  525. unsigned long flags;
  526. struct hw_perf_event *hwc = &event->hw;
  527. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  528. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  529. int idx = hwc->idx;
  530. /*
  531. * Disable counter and interrupt
  532. */
  533. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  534. /*
  535. * Disable counter
  536. */
  537. armv8pmu_disable_counter(idx);
  538. /*
  539. * Disable interrupt for this counter
  540. */
  541. armv8pmu_disable_intens(idx);
  542. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  543. }
  544. static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
  545. {
  546. u32 pmovsr;
  547. struct perf_sample_data data;
  548. struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
  549. struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
  550. struct pt_regs *regs;
  551. int idx;
  552. /*
  553. * Get and reset the IRQ flags
  554. */
  555. pmovsr = armv8pmu_getreset_flags();
  556. /*
  557. * Did an overflow occur?
  558. */
  559. if (!armv8pmu_has_overflowed(pmovsr))
  560. return IRQ_NONE;
  561. /*
  562. * Handle the counter(s) overflow(s)
  563. */
  564. regs = get_irq_regs();
  565. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  566. struct perf_event *event = cpuc->events[idx];
  567. struct hw_perf_event *hwc;
  568. /* Ignore if we don't have an event. */
  569. if (!event)
  570. continue;
  571. /*
  572. * We have a single interrupt for all counters. Check that
  573. * each counter has overflowed before we process it.
  574. */
  575. if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
  576. continue;
  577. hwc = &event->hw;
  578. armpmu_event_update(event);
  579. perf_sample_data_init(&data, 0, hwc->last_period);
  580. if (!armpmu_event_set_period(event))
  581. continue;
  582. if (perf_event_overflow(event, &data, regs))
  583. cpu_pmu->disable(event);
  584. }
  585. /*
  586. * Handle the pending perf events.
  587. *
  588. * Note: this call *must* be run with interrupts disabled. For
  589. * platforms that can have the PMU interrupts raised as an NMI, this
  590. * will not work.
  591. */
  592. irq_work_run();
  593. return IRQ_HANDLED;
  594. }
  595. static void armv8pmu_start(struct arm_pmu *cpu_pmu)
  596. {
  597. unsigned long flags;
  598. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  599. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  600. /* Enable all counters */
  601. armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
  602. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  603. }
  604. static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
  605. {
  606. unsigned long flags;
  607. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  608. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  609. /* Disable all counters */
  610. armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
  611. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  612. }
  613. static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
  614. struct perf_event *event)
  615. {
  616. int idx;
  617. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  618. struct hw_perf_event *hwc = &event->hw;
  619. unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
  620. /* Always place a cycle counter into the cycle counter. */
  621. if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
  622. if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
  623. return -EAGAIN;
  624. return ARMV8_IDX_CYCLE_COUNTER;
  625. }
  626. /*
  627. * For anything other than a cycle counter, try and use
  628. * the events counters
  629. */
  630. for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  631. if (!test_and_set_bit(idx, cpuc->used_mask))
  632. return idx;
  633. }
  634. /* The counters are all in use. */
  635. return -EAGAIN;
  636. }
  637. /*
  638. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  639. */
  640. static int armv8pmu_set_event_filter(struct hw_perf_event *event,
  641. struct perf_event_attr *attr)
  642. {
  643. unsigned long config_base = 0;
  644. if (attr->exclude_idle)
  645. return -EPERM;
  646. if (is_kernel_in_hyp_mode() &&
  647. attr->exclude_kernel != attr->exclude_hv)
  648. return -EINVAL;
  649. if (attr->exclude_user)
  650. config_base |= ARMV8_EXCLUDE_EL0;
  651. if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
  652. config_base |= ARMV8_EXCLUDE_EL1;
  653. if (!attr->exclude_hv)
  654. config_base |= ARMV8_INCLUDE_EL2;
  655. /*
  656. * Install the filter into config_base as this is used to
  657. * construct the event type.
  658. */
  659. event->config_base = config_base;
  660. return 0;
  661. }
  662. static void armv8pmu_reset(void *info)
  663. {
  664. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  665. u32 idx, nb_cnt = cpu_pmu->num_events;
  666. /* The counter and interrupt enable registers are unknown at reset. */
  667. for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  668. armv8pmu_disable_counter(idx);
  669. armv8pmu_disable_intens(idx);
  670. }
  671. /*
  672. * Initialize & Reset PMNC. Request overflow interrupt for
  673. * 64 bit cycle counter but cheat in armv8pmu_write_counter().
  674. */
  675. armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
  676. }
  677. static int armv8_pmuv3_map_event(struct perf_event *event)
  678. {
  679. return armpmu_map_event(event, &armv8_pmuv3_perf_map,
  680. &armv8_pmuv3_perf_cache_map,
  681. ARMV8_EVTYPE_EVENT);
  682. }
  683. static int armv8_a53_map_event(struct perf_event *event)
  684. {
  685. return armpmu_map_event(event, &armv8_a53_perf_map,
  686. &armv8_a53_perf_cache_map,
  687. ARMV8_EVTYPE_EVENT);
  688. }
  689. static int armv8_a57_map_event(struct perf_event *event)
  690. {
  691. return armpmu_map_event(event, &armv8_a57_perf_map,
  692. &armv8_a57_perf_cache_map,
  693. ARMV8_EVTYPE_EVENT);
  694. }
  695. static int armv8_thunder_map_event(struct perf_event *event)
  696. {
  697. return armpmu_map_event(event, &armv8_thunder_perf_map,
  698. &armv8_thunder_perf_cache_map,
  699. ARMV8_EVTYPE_EVENT);
  700. }
  701. static void armv8pmu_read_num_pmnc_events(void *info)
  702. {
  703. int *nb_cnt = info;
  704. /* Read the nb of CNTx counters supported from PMNC */
  705. *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
  706. /* Add the CPU cycles counter */
  707. *nb_cnt += 1;
  708. }
  709. static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
  710. {
  711. return smp_call_function_any(&arm_pmu->supported_cpus,
  712. armv8pmu_read_num_pmnc_events,
  713. &arm_pmu->num_events, 1);
  714. }
  715. static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
  716. {
  717. cpu_pmu->handle_irq = armv8pmu_handle_irq,
  718. cpu_pmu->enable = armv8pmu_enable_event,
  719. cpu_pmu->disable = armv8pmu_disable_event,
  720. cpu_pmu->read_counter = armv8pmu_read_counter,
  721. cpu_pmu->write_counter = armv8pmu_write_counter,
  722. cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
  723. cpu_pmu->start = armv8pmu_start,
  724. cpu_pmu->stop = armv8pmu_stop,
  725. cpu_pmu->reset = armv8pmu_reset,
  726. cpu_pmu->max_period = (1LLU << 32) - 1,
  727. cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
  728. }
  729. static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
  730. {
  731. armv8_pmu_init(cpu_pmu);
  732. cpu_pmu->name = "armv8_pmuv3";
  733. cpu_pmu->map_event = armv8_pmuv3_map_event;
  734. return armv8pmu_probe_num_events(cpu_pmu);
  735. }
  736. static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
  737. {
  738. armv8_pmu_init(cpu_pmu);
  739. cpu_pmu->name = "armv8_cortex_a53";
  740. cpu_pmu->map_event = armv8_a53_map_event;
  741. cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups;
  742. return armv8pmu_probe_num_events(cpu_pmu);
  743. }
  744. static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
  745. {
  746. armv8_pmu_init(cpu_pmu);
  747. cpu_pmu->name = "armv8_cortex_a57";
  748. cpu_pmu->map_event = armv8_a57_map_event;
  749. cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups;
  750. return armv8pmu_probe_num_events(cpu_pmu);
  751. }
  752. static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
  753. {
  754. armv8_pmu_init(cpu_pmu);
  755. cpu_pmu->name = "armv8_cortex_a72";
  756. cpu_pmu->map_event = armv8_a57_map_event;
  757. cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups;
  758. return armv8pmu_probe_num_events(cpu_pmu);
  759. }
  760. static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
  761. {
  762. armv8_pmu_init(cpu_pmu);
  763. cpu_pmu->name = "armv8_cavium_thunder";
  764. cpu_pmu->map_event = armv8_thunder_map_event;
  765. cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups;
  766. return armv8pmu_probe_num_events(cpu_pmu);
  767. }
  768. static const struct of_device_id armv8_pmu_of_device_ids[] = {
  769. {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
  770. {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
  771. {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
  772. {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
  773. {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
  774. {},
  775. };
  776. static int armv8_pmu_device_probe(struct platform_device *pdev)
  777. {
  778. return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
  779. }
  780. static struct platform_driver armv8_pmu_driver = {
  781. .driver = {
  782. .name = "armv8-pmu",
  783. .of_match_table = armv8_pmu_of_device_ids,
  784. },
  785. .probe = armv8_pmu_device_probe,
  786. };
  787. static int __init register_armv8_pmu_driver(void)
  788. {
  789. return platform_driver_register(&armv8_pmu_driver);
  790. }
  791. device_initcall(register_armv8_pmu_driver);