perf_event_v7.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908
  1. /*
  2. * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
  3. *
  4. * ARMv7 support: Jean Pihet <jpihet@mvista.com>
  5. * 2010 (c) MontaVista Software, LLC.
  6. *
  7. * Copied from ARMv6 code, with the low level code inspired
  8. * by the ARMv7 Oprofile code.
  9. *
  10. * Cortex-A8 has up to 4 configurable performance counters and
  11. * a single cycle counter.
  12. * Cortex-A9 has up to 31 configurable performance counters and
  13. * a single cycle counter.
  14. *
  15. * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16. * counter and all 4 performance counters together can be reset separately.
  17. */
  18. #ifdef CONFIG_CPU_V7
  19. #include <asm/cp15.h>
  20. #include <asm/vfp.h>
  21. #include "../vfp/vfpinstr.h"
  22. /*
  23. * Common ARMv7 event types
  24. *
  25. * Note: An implementation may not be able to count all of these events
  26. * but the encodings are considered to be `reserved' in the case that
  27. * they are not available.
  28. */
  29. enum armv7_perf_types {
  30. ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
  31. ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
  32. ARMV7_PERFCTR_ITLB_REFILL = 0x02,
  33. ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
  34. ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
  35. ARMV7_PERFCTR_DTLB_REFILL = 0x05,
  36. ARMV7_PERFCTR_MEM_READ = 0x06,
  37. ARMV7_PERFCTR_MEM_WRITE = 0x07,
  38. ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
  39. ARMV7_PERFCTR_EXC_TAKEN = 0x09,
  40. ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
  41. ARMV7_PERFCTR_CID_WRITE = 0x0B,
  42. /*
  43. * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  44. * It counts:
  45. * - all (taken) branch instructions,
  46. * - instructions that explicitly write the PC,
  47. * - exception generating instructions.
  48. */
  49. ARMV7_PERFCTR_PC_WRITE = 0x0C,
  50. ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
  51. ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
  52. ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
  53. ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
  54. ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
  55. ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
  56. /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  57. ARMV7_PERFCTR_MEM_ACCESS = 0x13,
  58. ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
  59. ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
  60. ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
  61. ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
  62. ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
  63. ARMV7_PERFCTR_BUS_ACCESS = 0x19,
  64. ARMV7_PERFCTR_MEM_ERROR = 0x1A,
  65. ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
  66. ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
  67. ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
  68. ARMV7_PERFCTR_CPU_CYCLES = 0xFF
  69. };
  70. /* ARMv7 Cortex-A8 specific event types */
  71. enum armv7_a8_perf_types {
  72. ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
  73. ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
  74. ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
  75. ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
  76. };
  77. /* ARMv7 Cortex-A9 specific event types */
  78. enum armv7_a9_perf_types {
  79. ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
  80. ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
  81. ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
  82. };
  83. /* ARMv7 Cortex-A5 specific event types */
  84. enum armv7_a5_perf_types {
  85. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
  86. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
  87. };
  88. /* ARMv7 Cortex-A15 specific event types */
  89. enum armv7_a15_perf_types {
  90. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
  91. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
  92. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
  93. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
  94. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
  95. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
  96. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
  97. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
  98. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
  99. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
  100. ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
  101. };
  102. /* ARMv7 Cortex-A12 specific event types */
  103. enum armv7_a12_perf_types {
  104. ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
  105. ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
  106. ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
  107. ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
  108. ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76,
  109. ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7,
  110. };
  111. /* ARMv7 Krait specific event types */
  112. enum krait_perf_types {
  113. KRAIT_PMRESR0_GROUP0 = 0xcc,
  114. KRAIT_PMRESR1_GROUP0 = 0xd0,
  115. KRAIT_PMRESR2_GROUP0 = 0xd4,
  116. KRAIT_VPMRESR0_GROUP0 = 0xd8,
  117. KRAIT_PERFCTR_L1_ICACHE_ACCESS = 0x10011,
  118. KRAIT_PERFCTR_L1_ICACHE_MISS = 0x10010,
  119. KRAIT_PERFCTR_L1_ITLB_ACCESS = 0x12222,
  120. KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
  121. };
  122. /* ARMv7 Scorpion specific event types */
  123. enum scorpion_perf_types {
  124. SCORPION_LPM0_GROUP0 = 0x4c,
  125. SCORPION_LPM1_GROUP0 = 0x50,
  126. SCORPION_LPM2_GROUP0 = 0x54,
  127. SCORPION_L2LPM_GROUP0 = 0x58,
  128. SCORPION_VLPM_GROUP0 = 0x5c,
  129. SCORPION_ICACHE_ACCESS = 0x10053,
  130. SCORPION_ICACHE_MISS = 0x10052,
  131. SCORPION_DTLB_ACCESS = 0x12013,
  132. SCORPION_DTLB_MISS = 0x12012,
  133. SCORPION_ITLB_MISS = 0x12021,
  134. };
  135. /*
  136. * Cortex-A8 HW events mapping
  137. *
  138. * The hardware events that we support. We do support cache operations but
  139. * we have harvard caches and no way to combine instruction and data
  140. * accesses/misses in hardware.
  141. */
  142. static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
  143. PERF_MAP_ALL_UNSUPPORTED,
  144. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  145. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  146. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  147. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  148. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  149. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  150. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
  151. };
  152. static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  153. [PERF_COUNT_HW_CACHE_OP_MAX]
  154. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  155. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  156. /*
  157. * The performance counters don't differentiate between read and write
  158. * accesses/misses so this isn't strictly correct, but it's the best we
  159. * can do. Writes and reads get combined.
  160. */
  161. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  162. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  163. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  164. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  165. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
  166. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  167. [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  168. [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  169. [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  170. [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  171. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  172. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  173. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  174. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  175. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  176. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  177. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  178. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  179. };
  180. /*
  181. * Cortex-A9 HW events mapping
  182. */
  183. static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
  184. PERF_MAP_ALL_UNSUPPORTED,
  185. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  186. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
  187. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  188. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  189. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  190. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  191. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
  192. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
  193. };
  194. static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  195. [PERF_COUNT_HW_CACHE_OP_MAX]
  196. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  197. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  198. /*
  199. * The performance counters don't differentiate between read and write
  200. * accesses/misses so this isn't strictly correct, but it's the best we
  201. * can do. Writes and reads get combined.
  202. */
  203. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  204. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  205. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  206. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  207. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  208. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  209. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  210. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  211. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  212. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  213. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  214. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  215. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  216. };
  217. /*
  218. * Cortex-A5 HW events mapping
  219. */
  220. static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
  221. PERF_MAP_ALL_UNSUPPORTED,
  222. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  223. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  224. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  225. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  226. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  227. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  228. };
  229. static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  230. [PERF_COUNT_HW_CACHE_OP_MAX]
  231. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  232. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  233. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  234. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  235. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  236. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  237. [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  238. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  239. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  240. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  241. /*
  242. * The prefetch counters don't differentiate between the I side and the
  243. * D side.
  244. */
  245. [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  246. [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  247. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  248. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  249. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  250. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  251. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  252. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  253. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  254. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  255. };
  256. /*
  257. * Cortex-A15 HW events mapping
  258. */
  259. static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
  260. PERF_MAP_ALL_UNSUPPORTED,
  261. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  262. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  263. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  264. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  265. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
  266. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  267. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  268. };
  269. static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  270. [PERF_COUNT_HW_CACHE_OP_MAX]
  271. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  272. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  273. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
  274. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
  275. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
  276. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
  277. /*
  278. * Not all performance counters differentiate between read and write
  279. * accesses/misses so we're not always strictly correct, but it's the
  280. * best we can do. Writes and reads get combined in these cases.
  281. */
  282. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  283. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  284. [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
  285. [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
  286. [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
  287. [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
  288. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
  289. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
  290. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  291. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  292. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  293. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  294. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  295. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  296. };
  297. /*
  298. * Cortex-A7 HW events mapping
  299. */
  300. static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
  301. PERF_MAP_ALL_UNSUPPORTED,
  302. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  303. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  304. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  305. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  306. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  307. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  308. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  309. };
  310. static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  311. [PERF_COUNT_HW_CACHE_OP_MAX]
  312. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  313. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  314. /*
  315. * The performance counters don't differentiate between read and write
  316. * accesses/misses so this isn't strictly correct, but it's the best we
  317. * can do. Writes and reads get combined.
  318. */
  319. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  320. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  321. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  322. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  323. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  324. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  325. [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  326. [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  327. [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  328. [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  329. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  330. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  331. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  332. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  333. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  334. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  335. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  336. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  337. };
  338. /*
  339. * Cortex-A12 HW events mapping
  340. */
  341. static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
  342. PERF_MAP_ALL_UNSUPPORTED,
  343. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  344. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  345. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  346. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  347. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
  348. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  349. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  350. };
  351. static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  352. [PERF_COUNT_HW_CACHE_OP_MAX]
  353. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  354. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  355. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
  356. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  357. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
  358. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  359. /*
  360. * Not all performance counters differentiate between read and write
  361. * accesses/misses so we're not always strictly correct, but it's the
  362. * best we can do. Writes and reads get combined in these cases.
  363. */
  364. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  365. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  366. [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
  367. [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  368. [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
  369. [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  370. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  371. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  372. [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
  373. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  374. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  375. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  376. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  377. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  378. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  379. };
  380. /*
  381. * Krait HW events mapping
  382. */
  383. static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
  384. PERF_MAP_ALL_UNSUPPORTED,
  385. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  386. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  387. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  388. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  389. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
  390. };
  391. static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
  392. PERF_MAP_ALL_UNSUPPORTED,
  393. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  394. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  395. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  396. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
  397. };
  398. static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  399. [PERF_COUNT_HW_CACHE_OP_MAX]
  400. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  401. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  402. /*
  403. * The performance counters don't differentiate between read and write
  404. * accesses/misses so this isn't strictly correct, but it's the best we
  405. * can do. Writes and reads get combined.
  406. */
  407. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  408. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  409. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  410. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  411. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
  412. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
  413. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
  414. [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
  415. [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
  416. [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
  417. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  418. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  419. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  420. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  421. };
  422. /*
  423. * Scorpion HW events mapping
  424. */
  425. static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
  426. PERF_MAP_ALL_UNSUPPORTED,
  427. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  428. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  429. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  430. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  431. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
  432. };
  433. static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  434. [PERF_COUNT_HW_CACHE_OP_MAX]
  435. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  436. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  437. /*
  438. * The performance counters don't differentiate between read and write
  439. * accesses/misses so this isn't strictly correct, but it's the best we
  440. * can do. Writes and reads get combined.
  441. */
  442. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  443. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  444. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  445. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  446. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
  447. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
  448. /*
  449. * Only ITLB misses and DTLB refills are supported. If users want the
  450. * DTLB refills misses a raw counter must be used.
  451. */
  452. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
  453. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
  454. [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
  455. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
  456. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
  457. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
  458. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  459. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  460. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  461. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  462. };
  463. /*
  464. * Perf Events' indices
  465. */
  466. #define ARMV7_IDX_CYCLE_COUNTER 0
  467. #define ARMV7_IDX_COUNTER0 1
  468. #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
  469. (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  470. #define ARMV7_MAX_COUNTERS 32
  471. #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
  472. /*
  473. * ARMv7 low level PMNC access
  474. */
  475. /*
  476. * Perf Event to low level counters mapping
  477. */
  478. #define ARMV7_IDX_TO_COUNTER(x) \
  479. (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
  480. /*
  481. * Per-CPU PMNC: config reg
  482. */
  483. #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
  484. #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
  485. #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
  486. #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  487. #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
  488. #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  489. #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
  490. #define ARMV7_PMNC_N_MASK 0x1f
  491. #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
  492. /*
  493. * FLAG: counters overflow flag status reg
  494. */
  495. #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
  496. #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
  497. /*
  498. * PMXEVTYPER: Event selection reg
  499. */
  500. #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
  501. #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
  502. /*
  503. * Event filters for PMUv2
  504. */
  505. #define ARMV7_EXCLUDE_PL1 (1 << 31)
  506. #define ARMV7_EXCLUDE_USER (1 << 30)
  507. #define ARMV7_INCLUDE_HYP (1 << 27)
  508. static inline u32 armv7_pmnc_read(void)
  509. {
  510. u32 val;
  511. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
  512. return val;
  513. }
  514. static inline void armv7_pmnc_write(u32 val)
  515. {
  516. val &= ARMV7_PMNC_MASK;
  517. isb();
  518. asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
  519. }
  520. static inline int armv7_pmnc_has_overflowed(u32 pmnc)
  521. {
  522. return pmnc & ARMV7_OVERFLOWED_MASK;
  523. }
  524. static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
  525. {
  526. return idx >= ARMV7_IDX_CYCLE_COUNTER &&
  527. idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
  528. }
  529. static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
  530. {
  531. return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
  532. }
  533. static inline void armv7_pmnc_select_counter(int idx)
  534. {
  535. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  536. asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
  537. isb();
  538. }
  539. static inline u32 armv7pmu_read_counter(struct perf_event *event)
  540. {
  541. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  542. struct hw_perf_event *hwc = &event->hw;
  543. int idx = hwc->idx;
  544. u32 value = 0;
  545. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  546. pr_err("CPU%u reading wrong counter %d\n",
  547. smp_processor_id(), idx);
  548. } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
  549. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
  550. } else {
  551. armv7_pmnc_select_counter(idx);
  552. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
  553. }
  554. return value;
  555. }
  556. static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
  557. {
  558. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  559. struct hw_perf_event *hwc = &event->hw;
  560. int idx = hwc->idx;
  561. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  562. pr_err("CPU%u writing wrong counter %d\n",
  563. smp_processor_id(), idx);
  564. } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
  565. asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
  566. } else {
  567. armv7_pmnc_select_counter(idx);
  568. asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
  569. }
  570. }
  571. static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
  572. {
  573. armv7_pmnc_select_counter(idx);
  574. val &= ARMV7_EVTYPE_MASK;
  575. asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
  576. }
  577. static inline void armv7_pmnc_enable_counter(int idx)
  578. {
  579. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  580. asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
  581. }
  582. static inline void armv7_pmnc_disable_counter(int idx)
  583. {
  584. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  585. asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
  586. }
  587. static inline void armv7_pmnc_enable_intens(int idx)
  588. {
  589. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  590. asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
  591. }
  592. static inline void armv7_pmnc_disable_intens(int idx)
  593. {
  594. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  595. asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
  596. isb();
  597. /* Clear the overflow flag in case an interrupt is pending. */
  598. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
  599. isb();
  600. }
  601. static inline u32 armv7_pmnc_getreset_flags(void)
  602. {
  603. u32 val;
  604. /* Read */
  605. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  606. /* Write to clear flags */
  607. val &= ARMV7_FLAG_MASK;
  608. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
  609. return val;
  610. }
  611. #ifdef DEBUG
  612. static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
  613. {
  614. u32 val;
  615. unsigned int cnt;
  616. pr_info("PMNC registers dump:\n");
  617. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
  618. pr_info("PMNC =0x%08x\n", val);
  619. asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
  620. pr_info("CNTENS=0x%08x\n", val);
  621. asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
  622. pr_info("INTENS=0x%08x\n", val);
  623. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  624. pr_info("FLAGS =0x%08x\n", val);
  625. asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
  626. pr_info("SELECT=0x%08x\n", val);
  627. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
  628. pr_info("CCNT =0x%08x\n", val);
  629. for (cnt = ARMV7_IDX_COUNTER0;
  630. cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
  631. armv7_pmnc_select_counter(cnt);
  632. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
  633. pr_info("CNT[%d] count =0x%08x\n",
  634. ARMV7_IDX_TO_COUNTER(cnt), val);
  635. asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
  636. pr_info("CNT[%d] evtsel=0x%08x\n",
  637. ARMV7_IDX_TO_COUNTER(cnt), val);
  638. }
  639. }
  640. #endif
  641. static void armv7pmu_enable_event(struct perf_event *event)
  642. {
  643. unsigned long flags;
  644. struct hw_perf_event *hwc = &event->hw;
  645. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  646. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  647. int idx = hwc->idx;
  648. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  649. pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
  650. smp_processor_id(), idx);
  651. return;
  652. }
  653. /*
  654. * Enable counter and interrupt, and set the counter to count
  655. * the event that we're interested in.
  656. */
  657. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  658. /*
  659. * Disable counter
  660. */
  661. armv7_pmnc_disable_counter(idx);
  662. /*
  663. * Set event (if destined for PMNx counters)
  664. * We only need to set the event for the cycle counter if we
  665. * have the ability to perform event filtering.
  666. */
  667. if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
  668. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  669. /*
  670. * Enable interrupt for this counter
  671. */
  672. armv7_pmnc_enable_intens(idx);
  673. /*
  674. * Enable counter
  675. */
  676. armv7_pmnc_enable_counter(idx);
  677. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  678. }
  679. static void armv7pmu_disable_event(struct perf_event *event)
  680. {
  681. unsigned long flags;
  682. struct hw_perf_event *hwc = &event->hw;
  683. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  684. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  685. int idx = hwc->idx;
  686. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  687. pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
  688. smp_processor_id(), idx);
  689. return;
  690. }
  691. /*
  692. * Disable counter and interrupt
  693. */
  694. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  695. /*
  696. * Disable counter
  697. */
  698. armv7_pmnc_disable_counter(idx);
  699. /*
  700. * Disable interrupt for this counter
  701. */
  702. armv7_pmnc_disable_intens(idx);
  703. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  704. }
  705. static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
  706. {
  707. u32 pmnc;
  708. struct perf_sample_data data;
  709. struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
  710. struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
  711. struct pt_regs *regs;
  712. int idx;
  713. /*
  714. * Get and reset the IRQ flags
  715. */
  716. pmnc = armv7_pmnc_getreset_flags();
  717. /*
  718. * Did an overflow occur?
  719. */
  720. if (!armv7_pmnc_has_overflowed(pmnc))
  721. return IRQ_NONE;
  722. /*
  723. * Handle the counter(s) overflow(s)
  724. */
  725. regs = get_irq_regs();
  726. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  727. struct perf_event *event = cpuc->events[idx];
  728. struct hw_perf_event *hwc;
  729. /* Ignore if we don't have an event. */
  730. if (!event)
  731. continue;
  732. /*
  733. * We have a single interrupt for all counters. Check that
  734. * each counter has overflowed before we process it.
  735. */
  736. if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
  737. continue;
  738. hwc = &event->hw;
  739. armpmu_event_update(event);
  740. perf_sample_data_init(&data, 0, hwc->last_period);
  741. if (!armpmu_event_set_period(event))
  742. continue;
  743. if (perf_event_overflow(event, &data, regs))
  744. cpu_pmu->disable(event);
  745. }
  746. /*
  747. * Handle the pending perf events.
  748. *
  749. * Note: this call *must* be run with interrupts disabled. For
  750. * platforms that can have the PMU interrupts raised as an NMI, this
  751. * will not work.
  752. */
  753. irq_work_run();
  754. return IRQ_HANDLED;
  755. }
  756. static void armv7pmu_start(struct arm_pmu *cpu_pmu)
  757. {
  758. unsigned long flags;
  759. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  760. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  761. /* Enable all counters */
  762. armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
  763. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  764. }
  765. static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
  766. {
  767. unsigned long flags;
  768. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  769. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  770. /* Disable all counters */
  771. armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
  772. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  773. }
  774. static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
  775. struct perf_event *event)
  776. {
  777. int idx;
  778. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  779. struct hw_perf_event *hwc = &event->hw;
  780. unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
  781. /* Always place a cycle counter into the cycle counter. */
  782. if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
  783. if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
  784. return -EAGAIN;
  785. return ARMV7_IDX_CYCLE_COUNTER;
  786. }
  787. /*
  788. * For anything other than a cycle counter, try and use
  789. * the events counters
  790. */
  791. for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  792. if (!test_and_set_bit(idx, cpuc->used_mask))
  793. return idx;
  794. }
  795. /* The counters are all in use. */
  796. return -EAGAIN;
  797. }
  798. /*
  799. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  800. */
  801. static int armv7pmu_set_event_filter(struct hw_perf_event *event,
  802. struct perf_event_attr *attr)
  803. {
  804. unsigned long config_base = 0;
  805. if (attr->exclude_idle)
  806. return -EPERM;
  807. if (attr->exclude_user)
  808. config_base |= ARMV7_EXCLUDE_USER;
  809. if (attr->exclude_kernel)
  810. config_base |= ARMV7_EXCLUDE_PL1;
  811. if (!attr->exclude_hv)
  812. config_base |= ARMV7_INCLUDE_HYP;
  813. /*
  814. * Install the filter into config_base as this is used to
  815. * construct the event type.
  816. */
  817. event->config_base = config_base;
  818. return 0;
  819. }
  820. static void armv7pmu_reset(void *info)
  821. {
  822. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  823. u32 idx, nb_cnt = cpu_pmu->num_events;
  824. /* The counter and interrupt enable registers are unknown at reset. */
  825. for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  826. armv7_pmnc_disable_counter(idx);
  827. armv7_pmnc_disable_intens(idx);
  828. }
  829. /* Initialize & Reset PMNC: C and P bits */
  830. armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
  831. }
  832. static int armv7_a8_map_event(struct perf_event *event)
  833. {
  834. return armpmu_map_event(event, &armv7_a8_perf_map,
  835. &armv7_a8_perf_cache_map, 0xFF);
  836. }
  837. static int armv7_a9_map_event(struct perf_event *event)
  838. {
  839. return armpmu_map_event(event, &armv7_a9_perf_map,
  840. &armv7_a9_perf_cache_map, 0xFF);
  841. }
  842. static int armv7_a5_map_event(struct perf_event *event)
  843. {
  844. return armpmu_map_event(event, &armv7_a5_perf_map,
  845. &armv7_a5_perf_cache_map, 0xFF);
  846. }
  847. static int armv7_a15_map_event(struct perf_event *event)
  848. {
  849. return armpmu_map_event(event, &armv7_a15_perf_map,
  850. &armv7_a15_perf_cache_map, 0xFF);
  851. }
  852. static int armv7_a7_map_event(struct perf_event *event)
  853. {
  854. return armpmu_map_event(event, &armv7_a7_perf_map,
  855. &armv7_a7_perf_cache_map, 0xFF);
  856. }
  857. static int armv7_a12_map_event(struct perf_event *event)
  858. {
  859. return armpmu_map_event(event, &armv7_a12_perf_map,
  860. &armv7_a12_perf_cache_map, 0xFF);
  861. }
  862. static int krait_map_event(struct perf_event *event)
  863. {
  864. return armpmu_map_event(event, &krait_perf_map,
  865. &krait_perf_cache_map, 0xFFFFF);
  866. }
  867. static int krait_map_event_no_branch(struct perf_event *event)
  868. {
  869. return armpmu_map_event(event, &krait_perf_map_no_branch,
  870. &krait_perf_cache_map, 0xFFFFF);
  871. }
  872. static int scorpion_map_event(struct perf_event *event)
  873. {
  874. return armpmu_map_event(event, &scorpion_perf_map,
  875. &scorpion_perf_cache_map, 0xFFFFF);
  876. }
  877. static void armv7pmu_init(struct arm_pmu *cpu_pmu)
  878. {
  879. cpu_pmu->handle_irq = armv7pmu_handle_irq;
  880. cpu_pmu->enable = armv7pmu_enable_event;
  881. cpu_pmu->disable = armv7pmu_disable_event;
  882. cpu_pmu->read_counter = armv7pmu_read_counter;
  883. cpu_pmu->write_counter = armv7pmu_write_counter;
  884. cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
  885. cpu_pmu->start = armv7pmu_start;
  886. cpu_pmu->stop = armv7pmu_stop;
  887. cpu_pmu->reset = armv7pmu_reset;
  888. cpu_pmu->max_period = (1LLU << 32) - 1;
  889. };
  890. static u32 armv7_read_num_pmnc_events(void)
  891. {
  892. u32 nb_cnt;
  893. /* Read the nb of CNTx counters supported from PMNC */
  894. nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
  895. /* Add the CPU cycles counter and return */
  896. return nb_cnt + 1;
  897. }
  898. static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  899. {
  900. armv7pmu_init(cpu_pmu);
  901. cpu_pmu->name = "armv7_cortex_a8";
  902. cpu_pmu->map_event = armv7_a8_map_event;
  903. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  904. return 0;
  905. }
  906. static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  907. {
  908. armv7pmu_init(cpu_pmu);
  909. cpu_pmu->name = "armv7_cortex_a9";
  910. cpu_pmu->map_event = armv7_a9_map_event;
  911. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  912. return 0;
  913. }
  914. static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  915. {
  916. armv7pmu_init(cpu_pmu);
  917. cpu_pmu->name = "armv7_cortex_a5";
  918. cpu_pmu->map_event = armv7_a5_map_event;
  919. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  920. return 0;
  921. }
  922. static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  923. {
  924. armv7pmu_init(cpu_pmu);
  925. cpu_pmu->name = "armv7_cortex_a15";
  926. cpu_pmu->map_event = armv7_a15_map_event;
  927. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  928. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  929. return 0;
  930. }
  931. static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  932. {
  933. armv7pmu_init(cpu_pmu);
  934. cpu_pmu->name = "armv7_cortex_a7";
  935. cpu_pmu->map_event = armv7_a7_map_event;
  936. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  937. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  938. return 0;
  939. }
  940. static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
  941. {
  942. armv7pmu_init(cpu_pmu);
  943. cpu_pmu->name = "armv7_cortex_a12";
  944. cpu_pmu->map_event = armv7_a12_map_event;
  945. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  946. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  947. return 0;
  948. }
  949. static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
  950. {
  951. armv7_a12_pmu_init(cpu_pmu);
  952. cpu_pmu->name = "armv7_cortex_a17";
  953. return 0;
  954. }
  955. /*
  956. * Krait Performance Monitor Region Event Selection Register (PMRESRn)
  957. *
  958. * 31 30 24 16 8 0
  959. * +--------------------------------+
  960. * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
  961. * +--------------------------------+
  962. * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
  963. * +--------------------------------+
  964. * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
  965. * +--------------------------------+
  966. * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
  967. * +--------------------------------+
  968. * EN | G=3 | G=2 | G=1 | G=0
  969. *
  970. * Event Encoding:
  971. *
  972. * hwc->config_base = 0xNRCCG
  973. *
  974. * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
  975. * R = region register
  976. * CC = class of events the group G is choosing from
  977. * G = group or particular event
  978. *
  979. * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
  980. *
  981. * A region (R) corresponds to a piece of the CPU (execution unit, instruction
  982. * unit, etc.) while the event code (CC) corresponds to a particular class of
  983. * events (interrupts for example). An event code is broken down into
  984. * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
  985. * example).
  986. */
  987. #define KRAIT_EVENT (1 << 16)
  988. #define VENUM_EVENT (2 << 16)
  989. #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
  990. #define PMRESRn_EN BIT(31)
  991. #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
  992. #define EVENT_GROUP(event) ((event) & 0xf) /* G */
  993. #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
  994. #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
  995. #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
  996. static u32 krait_read_pmresrn(int n)
  997. {
  998. u32 val;
  999. switch (n) {
  1000. case 0:
  1001. asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
  1002. break;
  1003. case 1:
  1004. asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
  1005. break;
  1006. case 2:
  1007. asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
  1008. break;
  1009. default:
  1010. BUG(); /* Should be validated in krait_pmu_get_event_idx() */
  1011. }
  1012. return val;
  1013. }
  1014. static void krait_write_pmresrn(int n, u32 val)
  1015. {
  1016. switch (n) {
  1017. case 0:
  1018. asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
  1019. break;
  1020. case 1:
  1021. asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
  1022. break;
  1023. case 2:
  1024. asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
  1025. break;
  1026. default:
  1027. BUG(); /* Should be validated in krait_pmu_get_event_idx() */
  1028. }
  1029. }
  1030. static u32 venum_read_pmresr(void)
  1031. {
  1032. u32 val;
  1033. asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
  1034. return val;
  1035. }
  1036. static void venum_write_pmresr(u32 val)
  1037. {
  1038. asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
  1039. }
  1040. static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
  1041. {
  1042. u32 venum_new_val;
  1043. u32 fp_new_val;
  1044. BUG_ON(preemptible());
  1045. /* CPACR Enable CP10 and CP11 access */
  1046. *venum_orig_val = get_copro_access();
  1047. venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
  1048. set_copro_access(venum_new_val);
  1049. /* Enable FPEXC */
  1050. *fp_orig_val = fmrx(FPEXC);
  1051. fp_new_val = *fp_orig_val | FPEXC_EN;
  1052. fmxr(FPEXC, fp_new_val);
  1053. }
  1054. static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
  1055. {
  1056. BUG_ON(preemptible());
  1057. /* Restore FPEXC */
  1058. fmxr(FPEXC, fp_orig_val);
  1059. isb();
  1060. /* Restore CPACR */
  1061. set_copro_access(venum_orig_val);
  1062. }
  1063. static u32 krait_get_pmresrn_event(unsigned int region)
  1064. {
  1065. static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
  1066. KRAIT_PMRESR1_GROUP0,
  1067. KRAIT_PMRESR2_GROUP0 };
  1068. return pmresrn_table[region];
  1069. }
  1070. static void krait_evt_setup(int idx, u32 config_base)
  1071. {
  1072. u32 val;
  1073. u32 mask;
  1074. u32 vval, fval;
  1075. unsigned int region = EVENT_REGION(config_base);
  1076. unsigned int group = EVENT_GROUP(config_base);
  1077. unsigned int code = EVENT_CODE(config_base);
  1078. unsigned int group_shift;
  1079. bool venum_event = EVENT_VENUM(config_base);
  1080. group_shift = group * 8;
  1081. mask = 0xff << group_shift;
  1082. /* Configure evtsel for the region and group */
  1083. if (venum_event)
  1084. val = KRAIT_VPMRESR0_GROUP0;
  1085. else
  1086. val = krait_get_pmresrn_event(region);
  1087. val += group;
  1088. /* Mix in mode-exclusion bits */
  1089. val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
  1090. armv7_pmnc_write_evtsel(idx, val);
  1091. if (venum_event) {
  1092. venum_pre_pmresr(&vval, &fval);
  1093. val = venum_read_pmresr();
  1094. val &= ~mask;
  1095. val |= code << group_shift;
  1096. val |= PMRESRn_EN;
  1097. venum_write_pmresr(val);
  1098. venum_post_pmresr(vval, fval);
  1099. } else {
  1100. val = krait_read_pmresrn(region);
  1101. val &= ~mask;
  1102. val |= code << group_shift;
  1103. val |= PMRESRn_EN;
  1104. krait_write_pmresrn(region, val);
  1105. }
  1106. }
  1107. static u32 clear_pmresrn_group(u32 val, int group)
  1108. {
  1109. u32 mask;
  1110. int group_shift;
  1111. group_shift = group * 8;
  1112. mask = 0xff << group_shift;
  1113. val &= ~mask;
  1114. /* Don't clear enable bit if entire region isn't disabled */
  1115. if (val & ~PMRESRn_EN)
  1116. return val |= PMRESRn_EN;
  1117. return 0;
  1118. }
  1119. static void krait_clearpmu(u32 config_base)
  1120. {
  1121. u32 val;
  1122. u32 vval, fval;
  1123. unsigned int region = EVENT_REGION(config_base);
  1124. unsigned int group = EVENT_GROUP(config_base);
  1125. bool venum_event = EVENT_VENUM(config_base);
  1126. if (venum_event) {
  1127. venum_pre_pmresr(&vval, &fval);
  1128. val = venum_read_pmresr();
  1129. val = clear_pmresrn_group(val, group);
  1130. venum_write_pmresr(val);
  1131. venum_post_pmresr(vval, fval);
  1132. } else {
  1133. val = krait_read_pmresrn(region);
  1134. val = clear_pmresrn_group(val, group);
  1135. krait_write_pmresrn(region, val);
  1136. }
  1137. }
  1138. static void krait_pmu_disable_event(struct perf_event *event)
  1139. {
  1140. unsigned long flags;
  1141. struct hw_perf_event *hwc = &event->hw;
  1142. int idx = hwc->idx;
  1143. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1144. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  1145. /* Disable counter and interrupt */
  1146. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1147. /* Disable counter */
  1148. armv7_pmnc_disable_counter(idx);
  1149. /*
  1150. * Clear pmresr code (if destined for PMNx counters)
  1151. */
  1152. if (hwc->config_base & KRAIT_EVENT_MASK)
  1153. krait_clearpmu(hwc->config_base);
  1154. /* Disable interrupt for this counter */
  1155. armv7_pmnc_disable_intens(idx);
  1156. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1157. }
  1158. static void krait_pmu_enable_event(struct perf_event *event)
  1159. {
  1160. unsigned long flags;
  1161. struct hw_perf_event *hwc = &event->hw;
  1162. int idx = hwc->idx;
  1163. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1164. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  1165. /*
  1166. * Enable counter and interrupt, and set the counter to count
  1167. * the event that we're interested in.
  1168. */
  1169. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1170. /* Disable counter */
  1171. armv7_pmnc_disable_counter(idx);
  1172. /*
  1173. * Set event (if destined for PMNx counters)
  1174. * We set the event for the cycle counter because we
  1175. * have the ability to perform event filtering.
  1176. */
  1177. if (hwc->config_base & KRAIT_EVENT_MASK)
  1178. krait_evt_setup(idx, hwc->config_base);
  1179. else
  1180. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  1181. /* Enable interrupt for this counter */
  1182. armv7_pmnc_enable_intens(idx);
  1183. /* Enable counter */
  1184. armv7_pmnc_enable_counter(idx);
  1185. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1186. }
  1187. static void krait_pmu_reset(void *info)
  1188. {
  1189. u32 vval, fval;
  1190. struct arm_pmu *cpu_pmu = info;
  1191. u32 idx, nb_cnt = cpu_pmu->num_events;
  1192. armv7pmu_reset(info);
  1193. /* Clear all pmresrs */
  1194. krait_write_pmresrn(0, 0);
  1195. krait_write_pmresrn(1, 0);
  1196. krait_write_pmresrn(2, 0);
  1197. venum_pre_pmresr(&vval, &fval);
  1198. venum_write_pmresr(0);
  1199. venum_post_pmresr(vval, fval);
  1200. /* Reset PMxEVNCTCR to sane default */
  1201. for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  1202. armv7_pmnc_select_counter(idx);
  1203. asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
  1204. }
  1205. }
  1206. static int krait_event_to_bit(struct perf_event *event, unsigned int region,
  1207. unsigned int group)
  1208. {
  1209. int bit;
  1210. struct hw_perf_event *hwc = &event->hw;
  1211. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1212. if (hwc->config_base & VENUM_EVENT)
  1213. bit = KRAIT_VPMRESR0_GROUP0;
  1214. else
  1215. bit = krait_get_pmresrn_event(region);
  1216. bit -= krait_get_pmresrn_event(0);
  1217. bit += group;
  1218. /*
  1219. * Lower bits are reserved for use by the counters (see
  1220. * armv7pmu_get_event_idx() for more info)
  1221. */
  1222. bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
  1223. return bit;
  1224. }
  1225. /*
  1226. * We check for column exclusion constraints here.
  1227. * Two events cant use the same group within a pmresr register.
  1228. */
  1229. static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
  1230. struct perf_event *event)
  1231. {
  1232. int idx;
  1233. int bit = -1;
  1234. struct hw_perf_event *hwc = &event->hw;
  1235. unsigned int region = EVENT_REGION(hwc->config_base);
  1236. unsigned int code = EVENT_CODE(hwc->config_base);
  1237. unsigned int group = EVENT_GROUP(hwc->config_base);
  1238. bool venum_event = EVENT_VENUM(hwc->config_base);
  1239. bool krait_event = EVENT_CPU(hwc->config_base);
  1240. if (venum_event || krait_event) {
  1241. /* Ignore invalid events */
  1242. if (group > 3 || region > 2)
  1243. return -EINVAL;
  1244. if (venum_event && (code & 0xe0))
  1245. return -EINVAL;
  1246. bit = krait_event_to_bit(event, region, group);
  1247. if (test_and_set_bit(bit, cpuc->used_mask))
  1248. return -EAGAIN;
  1249. }
  1250. idx = armv7pmu_get_event_idx(cpuc, event);
  1251. if (idx < 0 && bit >= 0)
  1252. clear_bit(bit, cpuc->used_mask);
  1253. return idx;
  1254. }
  1255. static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
  1256. struct perf_event *event)
  1257. {
  1258. int bit;
  1259. struct hw_perf_event *hwc = &event->hw;
  1260. unsigned int region = EVENT_REGION(hwc->config_base);
  1261. unsigned int group = EVENT_GROUP(hwc->config_base);
  1262. bool venum_event = EVENT_VENUM(hwc->config_base);
  1263. bool krait_event = EVENT_CPU(hwc->config_base);
  1264. if (venum_event || krait_event) {
  1265. bit = krait_event_to_bit(event, region, group);
  1266. clear_bit(bit, cpuc->used_mask);
  1267. }
  1268. }
  1269. static int krait_pmu_init(struct arm_pmu *cpu_pmu)
  1270. {
  1271. armv7pmu_init(cpu_pmu);
  1272. cpu_pmu->name = "armv7_krait";
  1273. /* Some early versions of Krait don't support PC write events */
  1274. if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
  1275. "qcom,no-pc-write"))
  1276. cpu_pmu->map_event = krait_map_event_no_branch;
  1277. else
  1278. cpu_pmu->map_event = krait_map_event;
  1279. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1280. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1281. cpu_pmu->reset = krait_pmu_reset;
  1282. cpu_pmu->enable = krait_pmu_enable_event;
  1283. cpu_pmu->disable = krait_pmu_disable_event;
  1284. cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
  1285. cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
  1286. return 0;
  1287. }
  1288. /*
  1289. * Scorpion Local Performance Monitor Register (LPMn)
  1290. *
  1291. * 31 30 24 16 8 0
  1292. * +--------------------------------+
  1293. * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
  1294. * +--------------------------------+
  1295. * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
  1296. * +--------------------------------+
  1297. * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
  1298. * +--------------------------------+
  1299. * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
  1300. * +--------------------------------+
  1301. * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
  1302. * +--------------------------------+
  1303. * EN | G=3 | G=2 | G=1 | G=0
  1304. *
  1305. *
  1306. * Event Encoding:
  1307. *
  1308. * hwc->config_base = 0xNRCCG
  1309. *
  1310. * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
  1311. * R = region register
  1312. * CC = class of events the group G is choosing from
  1313. * G = group or particular event
  1314. *
  1315. * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
  1316. *
  1317. * A region (R) corresponds to a piece of the CPU (execution unit, instruction
  1318. * unit, etc.) while the event code (CC) corresponds to a particular class of
  1319. * events (interrupts for example). An event code is broken down into
  1320. * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
  1321. * example).
  1322. */
  1323. static u32 scorpion_read_pmresrn(int n)
  1324. {
  1325. u32 val;
  1326. switch (n) {
  1327. case 0:
  1328. asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
  1329. break;
  1330. case 1:
  1331. asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
  1332. break;
  1333. case 2:
  1334. asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
  1335. break;
  1336. case 3:
  1337. asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
  1338. break;
  1339. default:
  1340. BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
  1341. }
  1342. return val;
  1343. }
  1344. static void scorpion_write_pmresrn(int n, u32 val)
  1345. {
  1346. switch (n) {
  1347. case 0:
  1348. asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
  1349. break;
  1350. case 1:
  1351. asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
  1352. break;
  1353. case 2:
  1354. asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
  1355. break;
  1356. case 3:
  1357. asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
  1358. break;
  1359. default:
  1360. BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
  1361. }
  1362. }
  1363. static u32 scorpion_get_pmresrn_event(unsigned int region)
  1364. {
  1365. static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
  1366. SCORPION_LPM1_GROUP0,
  1367. SCORPION_LPM2_GROUP0,
  1368. SCORPION_L2LPM_GROUP0 };
  1369. return pmresrn_table[region];
  1370. }
  1371. static void scorpion_evt_setup(int idx, u32 config_base)
  1372. {
  1373. u32 val;
  1374. u32 mask;
  1375. u32 vval, fval;
  1376. unsigned int region = EVENT_REGION(config_base);
  1377. unsigned int group = EVENT_GROUP(config_base);
  1378. unsigned int code = EVENT_CODE(config_base);
  1379. unsigned int group_shift;
  1380. bool venum_event = EVENT_VENUM(config_base);
  1381. group_shift = group * 8;
  1382. mask = 0xff << group_shift;
  1383. /* Configure evtsel for the region and group */
  1384. if (venum_event)
  1385. val = SCORPION_VLPM_GROUP0;
  1386. else
  1387. val = scorpion_get_pmresrn_event(region);
  1388. val += group;
  1389. /* Mix in mode-exclusion bits */
  1390. val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
  1391. armv7_pmnc_write_evtsel(idx, val);
  1392. asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
  1393. if (venum_event) {
  1394. venum_pre_pmresr(&vval, &fval);
  1395. val = venum_read_pmresr();
  1396. val &= ~mask;
  1397. val |= code << group_shift;
  1398. val |= PMRESRn_EN;
  1399. venum_write_pmresr(val);
  1400. venum_post_pmresr(vval, fval);
  1401. } else {
  1402. val = scorpion_read_pmresrn(region);
  1403. val &= ~mask;
  1404. val |= code << group_shift;
  1405. val |= PMRESRn_EN;
  1406. scorpion_write_pmresrn(region, val);
  1407. }
  1408. }
  1409. static void scorpion_clearpmu(u32 config_base)
  1410. {
  1411. u32 val;
  1412. u32 vval, fval;
  1413. unsigned int region = EVENT_REGION(config_base);
  1414. unsigned int group = EVENT_GROUP(config_base);
  1415. bool venum_event = EVENT_VENUM(config_base);
  1416. if (venum_event) {
  1417. venum_pre_pmresr(&vval, &fval);
  1418. val = venum_read_pmresr();
  1419. val = clear_pmresrn_group(val, group);
  1420. venum_write_pmresr(val);
  1421. venum_post_pmresr(vval, fval);
  1422. } else {
  1423. val = scorpion_read_pmresrn(region);
  1424. val = clear_pmresrn_group(val, group);
  1425. scorpion_write_pmresrn(region, val);
  1426. }
  1427. }
  1428. static void scorpion_pmu_disable_event(struct perf_event *event)
  1429. {
  1430. unsigned long flags;
  1431. struct hw_perf_event *hwc = &event->hw;
  1432. int idx = hwc->idx;
  1433. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1434. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  1435. /* Disable counter and interrupt */
  1436. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1437. /* Disable counter */
  1438. armv7_pmnc_disable_counter(idx);
  1439. /*
  1440. * Clear pmresr code (if destined for PMNx counters)
  1441. */
  1442. if (hwc->config_base & KRAIT_EVENT_MASK)
  1443. scorpion_clearpmu(hwc->config_base);
  1444. /* Disable interrupt for this counter */
  1445. armv7_pmnc_disable_intens(idx);
  1446. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1447. }
  1448. static void scorpion_pmu_enable_event(struct perf_event *event)
  1449. {
  1450. unsigned long flags;
  1451. struct hw_perf_event *hwc = &event->hw;
  1452. int idx = hwc->idx;
  1453. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1454. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  1455. /*
  1456. * Enable counter and interrupt, and set the counter to count
  1457. * the event that we're interested in.
  1458. */
  1459. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1460. /* Disable counter */
  1461. armv7_pmnc_disable_counter(idx);
  1462. /*
  1463. * Set event (if destined for PMNx counters)
  1464. * We don't set the event for the cycle counter because we
  1465. * don't have the ability to perform event filtering.
  1466. */
  1467. if (hwc->config_base & KRAIT_EVENT_MASK)
  1468. scorpion_evt_setup(idx, hwc->config_base);
  1469. else if (idx != ARMV7_IDX_CYCLE_COUNTER)
  1470. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  1471. /* Enable interrupt for this counter */
  1472. armv7_pmnc_enable_intens(idx);
  1473. /* Enable counter */
  1474. armv7_pmnc_enable_counter(idx);
  1475. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1476. }
  1477. static void scorpion_pmu_reset(void *info)
  1478. {
  1479. u32 vval, fval;
  1480. struct arm_pmu *cpu_pmu = info;
  1481. u32 idx, nb_cnt = cpu_pmu->num_events;
  1482. armv7pmu_reset(info);
  1483. /* Clear all pmresrs */
  1484. scorpion_write_pmresrn(0, 0);
  1485. scorpion_write_pmresrn(1, 0);
  1486. scorpion_write_pmresrn(2, 0);
  1487. scorpion_write_pmresrn(3, 0);
  1488. venum_pre_pmresr(&vval, &fval);
  1489. venum_write_pmresr(0);
  1490. venum_post_pmresr(vval, fval);
  1491. /* Reset PMxEVNCTCR to sane default */
  1492. for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  1493. armv7_pmnc_select_counter(idx);
  1494. asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
  1495. }
  1496. }
  1497. static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
  1498. unsigned int group)
  1499. {
  1500. int bit;
  1501. struct hw_perf_event *hwc = &event->hw;
  1502. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1503. if (hwc->config_base & VENUM_EVENT)
  1504. bit = SCORPION_VLPM_GROUP0;
  1505. else
  1506. bit = scorpion_get_pmresrn_event(region);
  1507. bit -= scorpion_get_pmresrn_event(0);
  1508. bit += group;
  1509. /*
  1510. * Lower bits are reserved for use by the counters (see
  1511. * armv7pmu_get_event_idx() for more info)
  1512. */
  1513. bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
  1514. return bit;
  1515. }
  1516. /*
  1517. * We check for column exclusion constraints here.
  1518. * Two events cant use the same group within a pmresr register.
  1519. */
  1520. static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
  1521. struct perf_event *event)
  1522. {
  1523. int idx;
  1524. int bit = -1;
  1525. struct hw_perf_event *hwc = &event->hw;
  1526. unsigned int region = EVENT_REGION(hwc->config_base);
  1527. unsigned int group = EVENT_GROUP(hwc->config_base);
  1528. bool venum_event = EVENT_VENUM(hwc->config_base);
  1529. bool scorpion_event = EVENT_CPU(hwc->config_base);
  1530. if (venum_event || scorpion_event) {
  1531. /* Ignore invalid events */
  1532. if (group > 3 || region > 3)
  1533. return -EINVAL;
  1534. bit = scorpion_event_to_bit(event, region, group);
  1535. if (test_and_set_bit(bit, cpuc->used_mask))
  1536. return -EAGAIN;
  1537. }
  1538. idx = armv7pmu_get_event_idx(cpuc, event);
  1539. if (idx < 0 && bit >= 0)
  1540. clear_bit(bit, cpuc->used_mask);
  1541. return idx;
  1542. }
  1543. static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
  1544. struct perf_event *event)
  1545. {
  1546. int bit;
  1547. struct hw_perf_event *hwc = &event->hw;
  1548. unsigned int region = EVENT_REGION(hwc->config_base);
  1549. unsigned int group = EVENT_GROUP(hwc->config_base);
  1550. bool venum_event = EVENT_VENUM(hwc->config_base);
  1551. bool scorpion_event = EVENT_CPU(hwc->config_base);
  1552. if (venum_event || scorpion_event) {
  1553. bit = scorpion_event_to_bit(event, region, group);
  1554. clear_bit(bit, cpuc->used_mask);
  1555. }
  1556. }
  1557. static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
  1558. {
  1559. armv7pmu_init(cpu_pmu);
  1560. cpu_pmu->name = "armv7_scorpion";
  1561. cpu_pmu->map_event = scorpion_map_event;
  1562. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1563. cpu_pmu->reset = scorpion_pmu_reset;
  1564. cpu_pmu->enable = scorpion_pmu_enable_event;
  1565. cpu_pmu->disable = scorpion_pmu_disable_event;
  1566. cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
  1567. cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
  1568. return 0;
  1569. }
  1570. static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
  1571. {
  1572. armv7pmu_init(cpu_pmu);
  1573. cpu_pmu->name = "armv7_scorpion_mp";
  1574. cpu_pmu->map_event = scorpion_map_event;
  1575. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1576. cpu_pmu->reset = scorpion_pmu_reset;
  1577. cpu_pmu->enable = scorpion_pmu_enable_event;
  1578. cpu_pmu->disable = scorpion_pmu_disable_event;
  1579. cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
  1580. cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
  1581. return 0;
  1582. }
  1583. #else
  1584. static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1585. {
  1586. return -ENODEV;
  1587. }
  1588. static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1589. {
  1590. return -ENODEV;
  1591. }
  1592. static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1593. {
  1594. return -ENODEV;
  1595. }
  1596. static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1597. {
  1598. return -ENODEV;
  1599. }
  1600. static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1601. {
  1602. return -ENODEV;
  1603. }
  1604. static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
  1605. {
  1606. return -ENODEV;
  1607. }
  1608. static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
  1609. {
  1610. return -ENODEV;
  1611. }
  1612. static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
  1613. {
  1614. return -ENODEV;
  1615. }
  1616. static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
  1617. {
  1618. return -ENODEV;
  1619. }
  1620. static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
  1621. {
  1622. return -ENODEV;
  1623. }
  1624. #endif /* CONFIG_CPU_V7 */