perf_event_v7.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040
  1. /*
  2. * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
  3. *
  4. * ARMv7 support: Jean Pihet <jpihet@mvista.com>
  5. * 2010 (c) MontaVista Software, LLC.
  6. *
  7. * Copied from ARMv6 code, with the low level code inspired
  8. * by the ARMv7 Oprofile code.
  9. *
  10. * Cortex-A8 has up to 4 configurable performance counters and
  11. * a single cycle counter.
  12. * Cortex-A9 has up to 31 configurable performance counters and
  13. * a single cycle counter.
  14. *
  15. * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16. * counter and all 4 performance counters together can be reset separately.
  17. */
  18. #ifdef CONFIG_CPU_V7
  19. #include <asm/cp15.h>
  20. #include <asm/vfp.h>
  21. #include "../vfp/vfpinstr.h"
  22. /*
  23. * Common ARMv7 event types
  24. *
  25. * Note: An implementation may not be able to count all of these events
  26. * but the encodings are considered to be `reserved' in the case that
  27. * they are not available.
  28. */
  29. enum armv7_perf_types {
  30. ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
  31. ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
  32. ARMV7_PERFCTR_ITLB_REFILL = 0x02,
  33. ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
  34. ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
  35. ARMV7_PERFCTR_DTLB_REFILL = 0x05,
  36. ARMV7_PERFCTR_MEM_READ = 0x06,
  37. ARMV7_PERFCTR_MEM_WRITE = 0x07,
  38. ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
  39. ARMV7_PERFCTR_EXC_TAKEN = 0x09,
  40. ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
  41. ARMV7_PERFCTR_CID_WRITE = 0x0B,
  42. /*
  43. * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  44. * It counts:
  45. * - all (taken) branch instructions,
  46. * - instructions that explicitly write the PC,
  47. * - exception generating instructions.
  48. */
  49. ARMV7_PERFCTR_PC_WRITE = 0x0C,
  50. ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
  51. ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
  52. ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
  53. ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
  54. ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
  55. ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
  56. /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  57. ARMV7_PERFCTR_MEM_ACCESS = 0x13,
  58. ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
  59. ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
  60. ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
  61. ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
  62. ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
  63. ARMV7_PERFCTR_BUS_ACCESS = 0x19,
  64. ARMV7_PERFCTR_MEM_ERROR = 0x1A,
  65. ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
  66. ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
  67. ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
  68. ARMV7_PERFCTR_CPU_CYCLES = 0xFF
  69. };
  70. /* ARMv7 Cortex-A8 specific event types */
  71. enum armv7_a8_perf_types {
  72. ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
  73. ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
  74. ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
  75. ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
  76. };
  77. /* ARMv7 Cortex-A9 specific event types */
  78. enum armv7_a9_perf_types {
  79. ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
  80. ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
  81. ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
  82. };
  83. /* ARMv7 Cortex-A5 specific event types */
  84. enum armv7_a5_perf_types {
  85. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
  86. ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
  87. };
  88. /* ARMv7 Cortex-A15 specific event types */
  89. enum armv7_a15_perf_types {
  90. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
  91. ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
  92. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
  93. ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
  94. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
  95. ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
  96. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
  97. ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
  98. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
  99. ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
  100. ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
  101. };
  102. /* ARMv7 Cortex-A12 specific event types */
  103. enum armv7_a12_perf_types {
  104. ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
  105. ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
  106. ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
  107. ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
  108. ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76,
  109. ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7,
  110. };
  111. /* ARMv7 Krait specific event types */
  112. enum krait_perf_types {
  113. KRAIT_PMRESR0_GROUP0 = 0xcc,
  114. KRAIT_PMRESR1_GROUP0 = 0xd0,
  115. KRAIT_PMRESR2_GROUP0 = 0xd4,
  116. KRAIT_VPMRESR0_GROUP0 = 0xd8,
  117. KRAIT_PERFCTR_L1_ICACHE_ACCESS = 0x10011,
  118. KRAIT_PERFCTR_L1_ICACHE_MISS = 0x10010,
  119. KRAIT_PERFCTR_L1_ITLB_ACCESS = 0x12222,
  120. KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
  121. };
  122. /*
  123. * Cortex-A8 HW events mapping
  124. *
  125. * The hardware events that we support. We do support cache operations but
  126. * we have harvard caches and no way to combine instruction and data
  127. * accesses/misses in hardware.
  128. */
  129. static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
  130. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  131. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  132. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  133. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  134. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  135. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  136. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  137. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
  138. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  139. };
  140. static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  141. [PERF_COUNT_HW_CACHE_OP_MAX]
  142. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  143. [C(L1D)] = {
  144. /*
  145. * The performance counters don't differentiate between read
  146. * and write accesses/misses so this isn't strictly correct,
  147. * but it's the best we can do. Writes and reads get
  148. * combined.
  149. */
  150. [C(OP_READ)] = {
  151. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  152. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  153. },
  154. [C(OP_WRITE)] = {
  155. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  156. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  157. },
  158. [C(OP_PREFETCH)] = {
  159. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  160. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  161. },
  162. },
  163. [C(L1I)] = {
  164. [C(OP_READ)] = {
  165. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
  166. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  167. },
  168. [C(OP_WRITE)] = {
  169. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  170. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  171. },
  172. [C(OP_PREFETCH)] = {
  173. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  174. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  175. },
  176. },
  177. [C(LL)] = {
  178. [C(OP_READ)] = {
  179. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  180. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  181. },
  182. [C(OP_WRITE)] = {
  183. [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
  184. [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
  185. },
  186. [C(OP_PREFETCH)] = {
  187. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  188. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  189. },
  190. },
  191. [C(DTLB)] = {
  192. [C(OP_READ)] = {
  193. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  194. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  195. },
  196. [C(OP_WRITE)] = {
  197. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  198. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  199. },
  200. [C(OP_PREFETCH)] = {
  201. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  202. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  203. },
  204. },
  205. [C(ITLB)] = {
  206. [C(OP_READ)] = {
  207. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  208. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  209. },
  210. [C(OP_WRITE)] = {
  211. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  212. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  213. },
  214. [C(OP_PREFETCH)] = {
  215. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  216. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  217. },
  218. },
  219. [C(BPU)] = {
  220. [C(OP_READ)] = {
  221. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  222. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  223. },
  224. [C(OP_WRITE)] = {
  225. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  226. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  227. },
  228. [C(OP_PREFETCH)] = {
  229. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  230. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  231. },
  232. },
  233. [C(NODE)] = {
  234. [C(OP_READ)] = {
  235. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  236. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  237. },
  238. [C(OP_WRITE)] = {
  239. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  240. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  241. },
  242. [C(OP_PREFETCH)] = {
  243. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  244. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  245. },
  246. },
  247. };
  248. /*
  249. * Cortex-A9 HW events mapping
  250. */
  251. static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
  252. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  253. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
  254. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  255. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  256. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  257. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  258. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  259. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
  260. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
  261. };
  262. static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  263. [PERF_COUNT_HW_CACHE_OP_MAX]
  264. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  265. [C(L1D)] = {
  266. /*
  267. * The performance counters don't differentiate between read
  268. * and write accesses/misses so this isn't strictly correct,
  269. * but it's the best we can do. Writes and reads get
  270. * combined.
  271. */
  272. [C(OP_READ)] = {
  273. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  274. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  275. },
  276. [C(OP_WRITE)] = {
  277. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  278. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  279. },
  280. [C(OP_PREFETCH)] = {
  281. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  282. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  283. },
  284. },
  285. [C(L1I)] = {
  286. [C(OP_READ)] = {
  287. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  288. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  289. },
  290. [C(OP_WRITE)] = {
  291. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  292. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  293. },
  294. [C(OP_PREFETCH)] = {
  295. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  296. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  297. },
  298. },
  299. [C(LL)] = {
  300. [C(OP_READ)] = {
  301. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  302. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  303. },
  304. [C(OP_WRITE)] = {
  305. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  306. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  307. },
  308. [C(OP_PREFETCH)] = {
  309. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  310. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  311. },
  312. },
  313. [C(DTLB)] = {
  314. [C(OP_READ)] = {
  315. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  316. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  317. },
  318. [C(OP_WRITE)] = {
  319. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  320. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  321. },
  322. [C(OP_PREFETCH)] = {
  323. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  324. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  325. },
  326. },
  327. [C(ITLB)] = {
  328. [C(OP_READ)] = {
  329. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  330. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  331. },
  332. [C(OP_WRITE)] = {
  333. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  334. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  335. },
  336. [C(OP_PREFETCH)] = {
  337. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  338. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  339. },
  340. },
  341. [C(BPU)] = {
  342. [C(OP_READ)] = {
  343. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  344. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  345. },
  346. [C(OP_WRITE)] = {
  347. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  348. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  349. },
  350. [C(OP_PREFETCH)] = {
  351. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  352. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  353. },
  354. },
  355. [C(NODE)] = {
  356. [C(OP_READ)] = {
  357. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  358. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  359. },
  360. [C(OP_WRITE)] = {
  361. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  362. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  363. },
  364. [C(OP_PREFETCH)] = {
  365. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  366. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  367. },
  368. },
  369. };
  370. /*
  371. * Cortex-A5 HW events mapping
  372. */
  373. static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
  374. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  375. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  376. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  377. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  378. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  379. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  380. [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
  381. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  382. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  383. };
  384. static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  385. [PERF_COUNT_HW_CACHE_OP_MAX]
  386. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  387. [C(L1D)] = {
  388. [C(OP_READ)] = {
  389. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  390. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  391. },
  392. [C(OP_WRITE)] = {
  393. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  394. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  395. },
  396. [C(OP_PREFETCH)] = {
  397. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  398. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  399. },
  400. },
  401. [C(L1I)] = {
  402. [C(OP_READ)] = {
  403. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  404. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  405. },
  406. [C(OP_WRITE)] = {
  407. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  408. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  409. },
  410. /*
  411. * The prefetch counters don't differentiate between the I
  412. * side and the D side.
  413. */
  414. [C(OP_PREFETCH)] = {
  415. [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
  416. [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
  417. },
  418. },
  419. [C(LL)] = {
  420. [C(OP_READ)] = {
  421. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  422. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  423. },
  424. [C(OP_WRITE)] = {
  425. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  426. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  427. },
  428. [C(OP_PREFETCH)] = {
  429. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  430. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  431. },
  432. },
  433. [C(DTLB)] = {
  434. [C(OP_READ)] = {
  435. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  436. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  437. },
  438. [C(OP_WRITE)] = {
  439. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  440. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  441. },
  442. [C(OP_PREFETCH)] = {
  443. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  444. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  445. },
  446. },
  447. [C(ITLB)] = {
  448. [C(OP_READ)] = {
  449. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  450. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  451. },
  452. [C(OP_WRITE)] = {
  453. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  454. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  455. },
  456. [C(OP_PREFETCH)] = {
  457. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  458. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  459. },
  460. },
  461. [C(BPU)] = {
  462. [C(OP_READ)] = {
  463. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  464. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  465. },
  466. [C(OP_WRITE)] = {
  467. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  468. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  469. },
  470. [C(OP_PREFETCH)] = {
  471. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  472. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  473. },
  474. },
  475. [C(NODE)] = {
  476. [C(OP_READ)] = {
  477. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  478. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  479. },
  480. [C(OP_WRITE)] = {
  481. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  482. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  483. },
  484. [C(OP_PREFETCH)] = {
  485. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  486. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  487. },
  488. },
  489. };
  490. /*
  491. * Cortex-A15 HW events mapping
  492. */
  493. static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
  494. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  495. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  496. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  497. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  498. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
  499. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  500. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  501. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  502. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  503. };
  504. static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  505. [PERF_COUNT_HW_CACHE_OP_MAX]
  506. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  507. [C(L1D)] = {
  508. [C(OP_READ)] = {
  509. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
  510. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
  511. },
  512. [C(OP_WRITE)] = {
  513. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
  514. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
  515. },
  516. [C(OP_PREFETCH)] = {
  517. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  518. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  519. },
  520. },
  521. [C(L1I)] = {
  522. /*
  523. * Not all performance counters differentiate between read
  524. * and write accesses/misses so we're not always strictly
  525. * correct, but it's the best we can do. Writes and reads get
  526. * combined in these cases.
  527. */
  528. [C(OP_READ)] = {
  529. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  530. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  531. },
  532. [C(OP_WRITE)] = {
  533. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  534. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  535. },
  536. [C(OP_PREFETCH)] = {
  537. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  538. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  539. },
  540. },
  541. [C(LL)] = {
  542. [C(OP_READ)] = {
  543. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
  544. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
  545. },
  546. [C(OP_WRITE)] = {
  547. [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
  548. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
  549. },
  550. [C(OP_PREFETCH)] = {
  551. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  552. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  553. },
  554. },
  555. [C(DTLB)] = {
  556. [C(OP_READ)] = {
  557. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  558. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
  559. },
  560. [C(OP_WRITE)] = {
  561. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  562. [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
  563. },
  564. [C(OP_PREFETCH)] = {
  565. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  566. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  567. },
  568. },
  569. [C(ITLB)] = {
  570. [C(OP_READ)] = {
  571. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  572. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  573. },
  574. [C(OP_WRITE)] = {
  575. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  576. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  577. },
  578. [C(OP_PREFETCH)] = {
  579. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  580. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  581. },
  582. },
  583. [C(BPU)] = {
  584. [C(OP_READ)] = {
  585. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  586. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  587. },
  588. [C(OP_WRITE)] = {
  589. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  590. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  591. },
  592. [C(OP_PREFETCH)] = {
  593. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  594. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  595. },
  596. },
  597. [C(NODE)] = {
  598. [C(OP_READ)] = {
  599. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  600. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  601. },
  602. [C(OP_WRITE)] = {
  603. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  604. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  605. },
  606. [C(OP_PREFETCH)] = {
  607. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  608. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  609. },
  610. },
  611. };
  612. /*
  613. * Cortex-A7 HW events mapping
  614. */
  615. static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
  616. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  617. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  618. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  619. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  620. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  621. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  622. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  623. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  624. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  625. };
  626. static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  627. [PERF_COUNT_HW_CACHE_OP_MAX]
  628. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  629. [C(L1D)] = {
  630. /*
  631. * The performance counters don't differentiate between read
  632. * and write accesses/misses so this isn't strictly correct,
  633. * but it's the best we can do. Writes and reads get
  634. * combined.
  635. */
  636. [C(OP_READ)] = {
  637. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  638. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  639. },
  640. [C(OP_WRITE)] = {
  641. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  642. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  643. },
  644. [C(OP_PREFETCH)] = {
  645. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  646. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  647. },
  648. },
  649. [C(L1I)] = {
  650. [C(OP_READ)] = {
  651. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  652. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  653. },
  654. [C(OP_WRITE)] = {
  655. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  656. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  657. },
  658. [C(OP_PREFETCH)] = {
  659. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  660. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  661. },
  662. },
  663. [C(LL)] = {
  664. [C(OP_READ)] = {
  665. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  666. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  667. },
  668. [C(OP_WRITE)] = {
  669. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
  670. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  671. },
  672. [C(OP_PREFETCH)] = {
  673. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  674. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  675. },
  676. },
  677. [C(DTLB)] = {
  678. [C(OP_READ)] = {
  679. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  680. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  681. },
  682. [C(OP_WRITE)] = {
  683. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  684. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  685. },
  686. [C(OP_PREFETCH)] = {
  687. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  688. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  689. },
  690. },
  691. [C(ITLB)] = {
  692. [C(OP_READ)] = {
  693. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  694. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  695. },
  696. [C(OP_WRITE)] = {
  697. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  698. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  699. },
  700. [C(OP_PREFETCH)] = {
  701. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  702. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  703. },
  704. },
  705. [C(BPU)] = {
  706. [C(OP_READ)] = {
  707. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  708. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  709. },
  710. [C(OP_WRITE)] = {
  711. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  712. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  713. },
  714. [C(OP_PREFETCH)] = {
  715. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  716. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  717. },
  718. },
  719. [C(NODE)] = {
  720. [C(OP_READ)] = {
  721. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  722. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  723. },
  724. [C(OP_WRITE)] = {
  725. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  726. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  727. },
  728. [C(OP_PREFETCH)] = {
  729. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  730. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  731. },
  732. },
  733. };
  734. /*
  735. * Cortex-A12 HW events mapping
  736. */
  737. static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
  738. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  739. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  740. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  741. [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  742. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
  743. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  744. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
  745. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
  746. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
  747. };
  748. static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  749. [PERF_COUNT_HW_CACHE_OP_MAX]
  750. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  751. [C(L1D)] = {
  752. [C(OP_READ)] = {
  753. [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
  754. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  755. },
  756. [C(OP_WRITE)] = {
  757. [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
  758. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  759. },
  760. [C(OP_PREFETCH)] = {
  761. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  762. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  763. },
  764. },
  765. [C(L1I)] = {
  766. /*
  767. * Not all performance counters differentiate between read
  768. * and write accesses/misses so we're not always strictly
  769. * correct, but it's the best we can do. Writes and reads get
  770. * combined in these cases.
  771. */
  772. [C(OP_READ)] = {
  773. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
  774. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
  775. },
  776. [C(OP_WRITE)] = {
  777. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  778. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  779. },
  780. [C(OP_PREFETCH)] = {
  781. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  782. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  783. },
  784. },
  785. [C(LL)] = {
  786. [C(OP_READ)] = {
  787. [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
  788. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  789. },
  790. [C(OP_WRITE)] = {
  791. [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
  792. [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
  793. },
  794. [C(OP_PREFETCH)] = {
  795. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  796. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  797. },
  798. },
  799. [C(DTLB)] = {
  800. [C(OP_READ)] = {
  801. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  802. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  803. },
  804. [C(OP_WRITE)] = {
  805. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  806. [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
  807. },
  808. [C(OP_PREFETCH)] = {
  809. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  810. [C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
  811. },
  812. },
  813. [C(ITLB)] = {
  814. [C(OP_READ)] = {
  815. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  816. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  817. },
  818. [C(OP_WRITE)] = {
  819. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  820. [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
  821. },
  822. [C(OP_PREFETCH)] = {
  823. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  824. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  825. },
  826. },
  827. [C(BPU)] = {
  828. [C(OP_READ)] = {
  829. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  830. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  831. },
  832. [C(OP_WRITE)] = {
  833. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  834. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  835. },
  836. [C(OP_PREFETCH)] = {
  837. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  838. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  839. },
  840. },
  841. [C(NODE)] = {
  842. [C(OP_READ)] = {
  843. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  844. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  845. },
  846. [C(OP_WRITE)] = {
  847. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  848. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  849. },
  850. [C(OP_PREFETCH)] = {
  851. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  852. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  853. },
  854. },
  855. };
  856. /*
  857. * Krait HW events mapping
  858. */
  859. static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
  860. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  861. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  862. [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
  863. [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
  864. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
  865. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  866. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
  867. };
  868. static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
  869. [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
  870. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
  871. [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
  872. [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
  873. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
  874. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  875. [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
  876. };
  877. static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  878. [PERF_COUNT_HW_CACHE_OP_MAX]
  879. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  880. [C(L1D)] = {
  881. /*
  882. * The performance counters don't differentiate between read
  883. * and write accesses/misses so this isn't strictly correct,
  884. * but it's the best we can do. Writes and reads get
  885. * combined.
  886. */
  887. [C(OP_READ)] = {
  888. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  889. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  890. },
  891. [C(OP_WRITE)] = {
  892. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
  893. [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
  894. },
  895. [C(OP_PREFETCH)] = {
  896. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  897. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  898. },
  899. },
  900. [C(L1I)] = {
  901. [C(OP_READ)] = {
  902. [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
  903. [C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
  904. },
  905. [C(OP_WRITE)] = {
  906. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  907. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  908. },
  909. [C(OP_PREFETCH)] = {
  910. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  911. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  912. },
  913. },
  914. [C(LL)] = {
  915. [C(OP_READ)] = {
  916. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  917. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  918. },
  919. [C(OP_WRITE)] = {
  920. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  921. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  922. },
  923. [C(OP_PREFETCH)] = {
  924. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  925. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  926. },
  927. },
  928. [C(DTLB)] = {
  929. [C(OP_READ)] = {
  930. [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
  931. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  932. },
  933. [C(OP_WRITE)] = {
  934. [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
  935. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  936. },
  937. [C(OP_PREFETCH)] = {
  938. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  939. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  940. },
  941. },
  942. [C(ITLB)] = {
  943. [C(OP_READ)] = {
  944. [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
  945. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  946. },
  947. [C(OP_WRITE)] = {
  948. [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
  949. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  950. },
  951. [C(OP_PREFETCH)] = {
  952. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  953. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  954. },
  955. },
  956. [C(BPU)] = {
  957. [C(OP_READ)] = {
  958. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  959. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  960. },
  961. [C(OP_WRITE)] = {
  962. [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
  963. [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
  964. },
  965. [C(OP_PREFETCH)] = {
  966. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  967. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  968. },
  969. },
  970. [C(NODE)] = {
  971. [C(OP_READ)] = {
  972. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  973. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  974. },
  975. [C(OP_WRITE)] = {
  976. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  977. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  978. },
  979. [C(OP_PREFETCH)] = {
  980. [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
  981. [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
  982. },
  983. },
  984. };
  985. /*
  986. * Perf Events' indices
  987. */
  988. #define ARMV7_IDX_CYCLE_COUNTER 0
  989. #define ARMV7_IDX_COUNTER0 1
  990. #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
  991. (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  992. #define ARMV7_MAX_COUNTERS 32
  993. #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
  994. /*
  995. * ARMv7 low level PMNC access
  996. */
  997. /*
  998. * Perf Event to low level counters mapping
  999. */
  1000. #define ARMV7_IDX_TO_COUNTER(x) \
  1001. (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
  1002. /*
  1003. * Per-CPU PMNC: config reg
  1004. */
  1005. #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
  1006. #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
  1007. #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
  1008. #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
  1009. #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
  1010. #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
  1011. #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
  1012. #define ARMV7_PMNC_N_MASK 0x1f
  1013. #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
  1014. /*
  1015. * FLAG: counters overflow flag status reg
  1016. */
  1017. #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
  1018. #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
  1019. /*
  1020. * PMXEVTYPER: Event selection reg
  1021. */
  1022. #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
  1023. #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
  1024. /*
  1025. * Event filters for PMUv2
  1026. */
  1027. #define ARMV7_EXCLUDE_PL1 (1 << 31)
  1028. #define ARMV7_EXCLUDE_USER (1 << 30)
  1029. #define ARMV7_INCLUDE_HYP (1 << 27)
  1030. static inline u32 armv7_pmnc_read(void)
  1031. {
  1032. u32 val;
  1033. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
  1034. return val;
  1035. }
  1036. static inline void armv7_pmnc_write(u32 val)
  1037. {
  1038. val &= ARMV7_PMNC_MASK;
  1039. isb();
  1040. asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
  1041. }
  1042. static inline int armv7_pmnc_has_overflowed(u32 pmnc)
  1043. {
  1044. return pmnc & ARMV7_OVERFLOWED_MASK;
  1045. }
  1046. static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
  1047. {
  1048. return idx >= ARMV7_IDX_CYCLE_COUNTER &&
  1049. idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
  1050. }
  1051. static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
  1052. {
  1053. return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
  1054. }
  1055. static inline int armv7_pmnc_select_counter(int idx)
  1056. {
  1057. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  1058. asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
  1059. isb();
  1060. return idx;
  1061. }
  1062. static inline u32 armv7pmu_read_counter(struct perf_event *event)
  1063. {
  1064. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1065. struct hw_perf_event *hwc = &event->hw;
  1066. int idx = hwc->idx;
  1067. u32 value = 0;
  1068. if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
  1069. pr_err("CPU%u reading wrong counter %d\n",
  1070. smp_processor_id(), idx);
  1071. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  1072. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
  1073. else if (armv7_pmnc_select_counter(idx) == idx)
  1074. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
  1075. return value;
  1076. }
  1077. static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
  1078. {
  1079. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1080. struct hw_perf_event *hwc = &event->hw;
  1081. int idx = hwc->idx;
  1082. if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
  1083. pr_err("CPU%u writing wrong counter %d\n",
  1084. smp_processor_id(), idx);
  1085. else if (idx == ARMV7_IDX_CYCLE_COUNTER)
  1086. asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
  1087. else if (armv7_pmnc_select_counter(idx) == idx)
  1088. asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
  1089. }
  1090. static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
  1091. {
  1092. if (armv7_pmnc_select_counter(idx) == idx) {
  1093. val &= ARMV7_EVTYPE_MASK;
  1094. asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
  1095. }
  1096. }
  1097. static inline int armv7_pmnc_enable_counter(int idx)
  1098. {
  1099. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  1100. asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
  1101. return idx;
  1102. }
  1103. static inline int armv7_pmnc_disable_counter(int idx)
  1104. {
  1105. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  1106. asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
  1107. return idx;
  1108. }
  1109. static inline int armv7_pmnc_enable_intens(int idx)
  1110. {
  1111. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  1112. asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
  1113. return idx;
  1114. }
  1115. static inline int armv7_pmnc_disable_intens(int idx)
  1116. {
  1117. u32 counter = ARMV7_IDX_TO_COUNTER(idx);
  1118. asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
  1119. isb();
  1120. /* Clear the overflow flag in case an interrupt is pending. */
  1121. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
  1122. isb();
  1123. return idx;
  1124. }
  1125. static inline u32 armv7_pmnc_getreset_flags(void)
  1126. {
  1127. u32 val;
  1128. /* Read */
  1129. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  1130. /* Write to clear flags */
  1131. val &= ARMV7_FLAG_MASK;
  1132. asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
  1133. return val;
  1134. }
  1135. #ifdef DEBUG
  1136. static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
  1137. {
  1138. u32 val;
  1139. unsigned int cnt;
  1140. printk(KERN_INFO "PMNC registers dump:\n");
  1141. asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
  1142. printk(KERN_INFO "PMNC =0x%08x\n", val);
  1143. asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
  1144. printk(KERN_INFO "CNTENS=0x%08x\n", val);
  1145. asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
  1146. printk(KERN_INFO "INTENS=0x%08x\n", val);
  1147. asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
  1148. printk(KERN_INFO "FLAGS =0x%08x\n", val);
  1149. asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
  1150. printk(KERN_INFO "SELECT=0x%08x\n", val);
  1151. asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
  1152. printk(KERN_INFO "CCNT =0x%08x\n", val);
  1153. for (cnt = ARMV7_IDX_COUNTER0;
  1154. cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
  1155. armv7_pmnc_select_counter(cnt);
  1156. asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
  1157. printk(KERN_INFO "CNT[%d] count =0x%08x\n",
  1158. ARMV7_IDX_TO_COUNTER(cnt), val);
  1159. asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
  1160. printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
  1161. ARMV7_IDX_TO_COUNTER(cnt), val);
  1162. }
  1163. }
  1164. #endif
  1165. static void armv7pmu_enable_event(struct perf_event *event)
  1166. {
  1167. unsigned long flags;
  1168. struct hw_perf_event *hwc = &event->hw;
  1169. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1170. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1171. int idx = hwc->idx;
  1172. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  1173. pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
  1174. smp_processor_id(), idx);
  1175. return;
  1176. }
  1177. /*
  1178. * Enable counter and interrupt, and set the counter to count
  1179. * the event that we're interested in.
  1180. */
  1181. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1182. /*
  1183. * Disable counter
  1184. */
  1185. armv7_pmnc_disable_counter(idx);
  1186. /*
  1187. * Set event (if destined for PMNx counters)
  1188. * We only need to set the event for the cycle counter if we
  1189. * have the ability to perform event filtering.
  1190. */
  1191. if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
  1192. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  1193. /*
  1194. * Enable interrupt for this counter
  1195. */
  1196. armv7_pmnc_enable_intens(idx);
  1197. /*
  1198. * Enable counter
  1199. */
  1200. armv7_pmnc_enable_counter(idx);
  1201. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1202. }
  1203. static void armv7pmu_disable_event(struct perf_event *event)
  1204. {
  1205. unsigned long flags;
  1206. struct hw_perf_event *hwc = &event->hw;
  1207. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1208. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1209. int idx = hwc->idx;
  1210. if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
  1211. pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
  1212. smp_processor_id(), idx);
  1213. return;
  1214. }
  1215. /*
  1216. * Disable counter and interrupt
  1217. */
  1218. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1219. /*
  1220. * Disable counter
  1221. */
  1222. armv7_pmnc_disable_counter(idx);
  1223. /*
  1224. * Disable interrupt for this counter
  1225. */
  1226. armv7_pmnc_disable_intens(idx);
  1227. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1228. }
  1229. static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
  1230. {
  1231. u32 pmnc;
  1232. struct perf_sample_data data;
  1233. struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
  1234. struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
  1235. struct pt_regs *regs;
  1236. int idx;
  1237. /*
  1238. * Get and reset the IRQ flags
  1239. */
  1240. pmnc = armv7_pmnc_getreset_flags();
  1241. /*
  1242. * Did an overflow occur?
  1243. */
  1244. if (!armv7_pmnc_has_overflowed(pmnc))
  1245. return IRQ_NONE;
  1246. /*
  1247. * Handle the counter(s) overflow(s)
  1248. */
  1249. regs = get_irq_regs();
  1250. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  1251. struct perf_event *event = cpuc->events[idx];
  1252. struct hw_perf_event *hwc;
  1253. /* Ignore if we don't have an event. */
  1254. if (!event)
  1255. continue;
  1256. /*
  1257. * We have a single interrupt for all counters. Check that
  1258. * each counter has overflowed before we process it.
  1259. */
  1260. if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
  1261. continue;
  1262. hwc = &event->hw;
  1263. armpmu_event_update(event);
  1264. perf_sample_data_init(&data, 0, hwc->last_period);
  1265. if (!armpmu_event_set_period(event))
  1266. continue;
  1267. if (perf_event_overflow(event, &data, regs))
  1268. cpu_pmu->disable(event);
  1269. }
  1270. /*
  1271. * Handle the pending perf events.
  1272. *
  1273. * Note: this call *must* be run with interrupts disabled. For
  1274. * platforms that can have the PMU interrupts raised as an NMI, this
  1275. * will not work.
  1276. */
  1277. irq_work_run();
  1278. return IRQ_HANDLED;
  1279. }
  1280. static void armv7pmu_start(struct arm_pmu *cpu_pmu)
  1281. {
  1282. unsigned long flags;
  1283. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1284. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1285. /* Enable all counters */
  1286. armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
  1287. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1288. }
  1289. static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
  1290. {
  1291. unsigned long flags;
  1292. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1293. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1294. /* Disable all counters */
  1295. armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
  1296. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1297. }
  1298. static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
  1299. struct perf_event *event)
  1300. {
  1301. int idx;
  1302. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1303. struct hw_perf_event *hwc = &event->hw;
  1304. unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
  1305. /* Always place a cycle counter into the cycle counter. */
  1306. if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
  1307. if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
  1308. return -EAGAIN;
  1309. return ARMV7_IDX_CYCLE_COUNTER;
  1310. }
  1311. /*
  1312. * For anything other than a cycle counter, try and use
  1313. * the events counters
  1314. */
  1315. for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
  1316. if (!test_and_set_bit(idx, cpuc->used_mask))
  1317. return idx;
  1318. }
  1319. /* The counters are all in use. */
  1320. return -EAGAIN;
  1321. }
  1322. /*
  1323. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  1324. */
  1325. static int armv7pmu_set_event_filter(struct hw_perf_event *event,
  1326. struct perf_event_attr *attr)
  1327. {
  1328. unsigned long config_base = 0;
  1329. if (attr->exclude_idle)
  1330. return -EPERM;
  1331. if (attr->exclude_user)
  1332. config_base |= ARMV7_EXCLUDE_USER;
  1333. if (attr->exclude_kernel)
  1334. config_base |= ARMV7_EXCLUDE_PL1;
  1335. if (!attr->exclude_hv)
  1336. config_base |= ARMV7_INCLUDE_HYP;
  1337. /*
  1338. * Install the filter into config_base as this is used to
  1339. * construct the event type.
  1340. */
  1341. event->config_base = config_base;
  1342. return 0;
  1343. }
  1344. static void armv7pmu_reset(void *info)
  1345. {
  1346. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  1347. u32 idx, nb_cnt = cpu_pmu->num_events;
  1348. /* The counter and interrupt enable registers are unknown at reset. */
  1349. for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  1350. armv7_pmnc_disable_counter(idx);
  1351. armv7_pmnc_disable_intens(idx);
  1352. }
  1353. /* Initialize & Reset PMNC: C and P bits */
  1354. armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
  1355. }
  1356. static int armv7_a8_map_event(struct perf_event *event)
  1357. {
  1358. return armpmu_map_event(event, &armv7_a8_perf_map,
  1359. &armv7_a8_perf_cache_map, 0xFF);
  1360. }
  1361. static int armv7_a9_map_event(struct perf_event *event)
  1362. {
  1363. return armpmu_map_event(event, &armv7_a9_perf_map,
  1364. &armv7_a9_perf_cache_map, 0xFF);
  1365. }
  1366. static int armv7_a5_map_event(struct perf_event *event)
  1367. {
  1368. return armpmu_map_event(event, &armv7_a5_perf_map,
  1369. &armv7_a5_perf_cache_map, 0xFF);
  1370. }
  1371. static int armv7_a15_map_event(struct perf_event *event)
  1372. {
  1373. return armpmu_map_event(event, &armv7_a15_perf_map,
  1374. &armv7_a15_perf_cache_map, 0xFF);
  1375. }
  1376. static int armv7_a7_map_event(struct perf_event *event)
  1377. {
  1378. return armpmu_map_event(event, &armv7_a7_perf_map,
  1379. &armv7_a7_perf_cache_map, 0xFF);
  1380. }
  1381. static int armv7_a12_map_event(struct perf_event *event)
  1382. {
  1383. return armpmu_map_event(event, &armv7_a12_perf_map,
  1384. &armv7_a12_perf_cache_map, 0xFF);
  1385. }
  1386. static int krait_map_event(struct perf_event *event)
  1387. {
  1388. return armpmu_map_event(event, &krait_perf_map,
  1389. &krait_perf_cache_map, 0xFFFFF);
  1390. }
  1391. static int krait_map_event_no_branch(struct perf_event *event)
  1392. {
  1393. return armpmu_map_event(event, &krait_perf_map_no_branch,
  1394. &krait_perf_cache_map, 0xFFFFF);
  1395. }
  1396. static void armv7pmu_init(struct arm_pmu *cpu_pmu)
  1397. {
  1398. cpu_pmu->handle_irq = armv7pmu_handle_irq;
  1399. cpu_pmu->enable = armv7pmu_enable_event;
  1400. cpu_pmu->disable = armv7pmu_disable_event;
  1401. cpu_pmu->read_counter = armv7pmu_read_counter;
  1402. cpu_pmu->write_counter = armv7pmu_write_counter;
  1403. cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
  1404. cpu_pmu->start = armv7pmu_start;
  1405. cpu_pmu->stop = armv7pmu_stop;
  1406. cpu_pmu->reset = armv7pmu_reset;
  1407. cpu_pmu->max_period = (1LLU << 32) - 1;
  1408. };
  1409. static u32 armv7_read_num_pmnc_events(void)
  1410. {
  1411. u32 nb_cnt;
  1412. /* Read the nb of CNTx counters supported from PMNC */
  1413. nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
  1414. /* Add the CPU cycles counter and return */
  1415. return nb_cnt + 1;
  1416. }
  1417. static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1418. {
  1419. armv7pmu_init(cpu_pmu);
  1420. cpu_pmu->name = "ARMv7 Cortex-A8";
  1421. cpu_pmu->map_event = armv7_a8_map_event;
  1422. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1423. return 0;
  1424. }
  1425. static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1426. {
  1427. armv7pmu_init(cpu_pmu);
  1428. cpu_pmu->name = "ARMv7 Cortex-A9";
  1429. cpu_pmu->map_event = armv7_a9_map_event;
  1430. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1431. return 0;
  1432. }
  1433. static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1434. {
  1435. armv7pmu_init(cpu_pmu);
  1436. cpu_pmu->name = "ARMv7 Cortex-A5";
  1437. cpu_pmu->map_event = armv7_a5_map_event;
  1438. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1439. return 0;
  1440. }
  1441. static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1442. {
  1443. armv7pmu_init(cpu_pmu);
  1444. cpu_pmu->name = "ARMv7 Cortex-A15";
  1445. cpu_pmu->map_event = armv7_a15_map_event;
  1446. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1447. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1448. return 0;
  1449. }
  1450. static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1451. {
  1452. armv7pmu_init(cpu_pmu);
  1453. cpu_pmu->name = "ARMv7 Cortex-A7";
  1454. cpu_pmu->map_event = armv7_a7_map_event;
  1455. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1456. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1457. return 0;
  1458. }
  1459. static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
  1460. {
  1461. armv7pmu_init(cpu_pmu);
  1462. cpu_pmu->name = "ARMv7 Cortex-A12";
  1463. cpu_pmu->map_event = armv7_a12_map_event;
  1464. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1465. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1466. return 0;
  1467. }
  1468. static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
  1469. {
  1470. armv7_a12_pmu_init(cpu_pmu);
  1471. cpu_pmu->name = "ARMv7 Cortex-A17";
  1472. return 0;
  1473. }
  1474. /*
  1475. * Krait Performance Monitor Region Event Selection Register (PMRESRn)
  1476. *
  1477. * 31 30 24 16 8 0
  1478. * +--------------------------------+
  1479. * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
  1480. * +--------------------------------+
  1481. * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
  1482. * +--------------------------------+
  1483. * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
  1484. * +--------------------------------+
  1485. * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
  1486. * +--------------------------------+
  1487. * EN | G=3 | G=2 | G=1 | G=0
  1488. *
  1489. * Event Encoding:
  1490. *
  1491. * hwc->config_base = 0xNRCCG
  1492. *
  1493. * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
  1494. * R = region register
  1495. * CC = class of events the group G is choosing from
  1496. * G = group or particular event
  1497. *
  1498. * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
  1499. *
  1500. * A region (R) corresponds to a piece of the CPU (execution unit, instruction
  1501. * unit, etc.) while the event code (CC) corresponds to a particular class of
  1502. * events (interrupts for example). An event code is broken down into
  1503. * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
  1504. * example).
  1505. */
  1506. #define KRAIT_EVENT (1 << 16)
  1507. #define VENUM_EVENT (2 << 16)
  1508. #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
  1509. #define PMRESRn_EN BIT(31)
  1510. static u32 krait_read_pmresrn(int n)
  1511. {
  1512. u32 val;
  1513. switch (n) {
  1514. case 0:
  1515. asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
  1516. break;
  1517. case 1:
  1518. asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
  1519. break;
  1520. case 2:
  1521. asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
  1522. break;
  1523. default:
  1524. BUG(); /* Should be validated in krait_pmu_get_event_idx() */
  1525. }
  1526. return val;
  1527. }
  1528. static void krait_write_pmresrn(int n, u32 val)
  1529. {
  1530. switch (n) {
  1531. case 0:
  1532. asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
  1533. break;
  1534. case 1:
  1535. asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
  1536. break;
  1537. case 2:
  1538. asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
  1539. break;
  1540. default:
  1541. BUG(); /* Should be validated in krait_pmu_get_event_idx() */
  1542. }
  1543. }
  1544. static u32 krait_read_vpmresr0(void)
  1545. {
  1546. u32 val;
  1547. asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
  1548. return val;
  1549. }
  1550. static void krait_write_vpmresr0(u32 val)
  1551. {
  1552. asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
  1553. }
  1554. static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
  1555. {
  1556. u32 venum_new_val;
  1557. u32 fp_new_val;
  1558. BUG_ON(preemptible());
  1559. /* CPACR Enable CP10 and CP11 access */
  1560. *venum_orig_val = get_copro_access();
  1561. venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
  1562. set_copro_access(venum_new_val);
  1563. /* Enable FPEXC */
  1564. *fp_orig_val = fmrx(FPEXC);
  1565. fp_new_val = *fp_orig_val | FPEXC_EN;
  1566. fmxr(FPEXC, fp_new_val);
  1567. }
  1568. static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
  1569. {
  1570. BUG_ON(preemptible());
  1571. /* Restore FPEXC */
  1572. fmxr(FPEXC, fp_orig_val);
  1573. isb();
  1574. /* Restore CPACR */
  1575. set_copro_access(venum_orig_val);
  1576. }
  1577. static u32 krait_get_pmresrn_event(unsigned int region)
  1578. {
  1579. static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
  1580. KRAIT_PMRESR1_GROUP0,
  1581. KRAIT_PMRESR2_GROUP0 };
  1582. return pmresrn_table[region];
  1583. }
  1584. static void krait_evt_setup(int idx, u32 config_base)
  1585. {
  1586. u32 val;
  1587. u32 mask;
  1588. u32 vval, fval;
  1589. unsigned int region;
  1590. unsigned int group;
  1591. unsigned int code;
  1592. unsigned int group_shift;
  1593. bool venum_event;
  1594. venum_event = !!(config_base & VENUM_EVENT);
  1595. region = (config_base >> 12) & 0xf;
  1596. code = (config_base >> 4) & 0xff;
  1597. group = (config_base >> 0) & 0xf;
  1598. group_shift = group * 8;
  1599. mask = 0xff << group_shift;
  1600. /* Configure evtsel for the region and group */
  1601. if (venum_event)
  1602. val = KRAIT_VPMRESR0_GROUP0;
  1603. else
  1604. val = krait_get_pmresrn_event(region);
  1605. val += group;
  1606. /* Mix in mode-exclusion bits */
  1607. val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
  1608. armv7_pmnc_write_evtsel(idx, val);
  1609. asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
  1610. if (venum_event) {
  1611. krait_pre_vpmresr0(&vval, &fval);
  1612. val = krait_read_vpmresr0();
  1613. val &= ~mask;
  1614. val |= code << group_shift;
  1615. val |= PMRESRn_EN;
  1616. krait_write_vpmresr0(val);
  1617. krait_post_vpmresr0(vval, fval);
  1618. } else {
  1619. val = krait_read_pmresrn(region);
  1620. val &= ~mask;
  1621. val |= code << group_shift;
  1622. val |= PMRESRn_EN;
  1623. krait_write_pmresrn(region, val);
  1624. }
  1625. }
  1626. static u32 krait_clear_pmresrn_group(u32 val, int group)
  1627. {
  1628. u32 mask;
  1629. int group_shift;
  1630. group_shift = group * 8;
  1631. mask = 0xff << group_shift;
  1632. val &= ~mask;
  1633. /* Don't clear enable bit if entire region isn't disabled */
  1634. if (val & ~PMRESRn_EN)
  1635. return val |= PMRESRn_EN;
  1636. return 0;
  1637. }
  1638. static void krait_clearpmu(u32 config_base)
  1639. {
  1640. u32 val;
  1641. u32 vval, fval;
  1642. unsigned int region;
  1643. unsigned int group;
  1644. bool venum_event;
  1645. venum_event = !!(config_base & VENUM_EVENT);
  1646. region = (config_base >> 12) & 0xf;
  1647. group = (config_base >> 0) & 0xf;
  1648. if (venum_event) {
  1649. krait_pre_vpmresr0(&vval, &fval);
  1650. val = krait_read_vpmresr0();
  1651. val = krait_clear_pmresrn_group(val, group);
  1652. krait_write_vpmresr0(val);
  1653. krait_post_vpmresr0(vval, fval);
  1654. } else {
  1655. val = krait_read_pmresrn(region);
  1656. val = krait_clear_pmresrn_group(val, group);
  1657. krait_write_pmresrn(region, val);
  1658. }
  1659. }
  1660. static void krait_pmu_disable_event(struct perf_event *event)
  1661. {
  1662. unsigned long flags;
  1663. struct hw_perf_event *hwc = &event->hw;
  1664. int idx = hwc->idx;
  1665. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1666. /* Disable counter and interrupt */
  1667. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1668. /* Disable counter */
  1669. armv7_pmnc_disable_counter(idx);
  1670. /*
  1671. * Clear pmresr code (if destined for PMNx counters)
  1672. */
  1673. if (hwc->config_base & KRAIT_EVENT_MASK)
  1674. krait_clearpmu(hwc->config_base);
  1675. /* Disable interrupt for this counter */
  1676. armv7_pmnc_disable_intens(idx);
  1677. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1678. }
  1679. static void krait_pmu_enable_event(struct perf_event *event)
  1680. {
  1681. unsigned long flags;
  1682. struct hw_perf_event *hwc = &event->hw;
  1683. int idx = hwc->idx;
  1684. struct pmu_hw_events *events = cpu_pmu->get_hw_events();
  1685. /*
  1686. * Enable counter and interrupt, and set the counter to count
  1687. * the event that we're interested in.
  1688. */
  1689. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  1690. /* Disable counter */
  1691. armv7_pmnc_disable_counter(idx);
  1692. /*
  1693. * Set event (if destined for PMNx counters)
  1694. * We set the event for the cycle counter because we
  1695. * have the ability to perform event filtering.
  1696. */
  1697. if (hwc->config_base & KRAIT_EVENT_MASK)
  1698. krait_evt_setup(idx, hwc->config_base);
  1699. else
  1700. armv7_pmnc_write_evtsel(idx, hwc->config_base);
  1701. /* Enable interrupt for this counter */
  1702. armv7_pmnc_enable_intens(idx);
  1703. /* Enable counter */
  1704. armv7_pmnc_enable_counter(idx);
  1705. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  1706. }
  1707. static void krait_pmu_reset(void *info)
  1708. {
  1709. u32 vval, fval;
  1710. armv7pmu_reset(info);
  1711. /* Clear all pmresrs */
  1712. krait_write_pmresrn(0, 0);
  1713. krait_write_pmresrn(1, 0);
  1714. krait_write_pmresrn(2, 0);
  1715. krait_pre_vpmresr0(&vval, &fval);
  1716. krait_write_vpmresr0(0);
  1717. krait_post_vpmresr0(vval, fval);
  1718. }
  1719. static int krait_event_to_bit(struct perf_event *event, unsigned int region,
  1720. unsigned int group)
  1721. {
  1722. int bit;
  1723. struct hw_perf_event *hwc = &event->hw;
  1724. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  1725. if (hwc->config_base & VENUM_EVENT)
  1726. bit = KRAIT_VPMRESR0_GROUP0;
  1727. else
  1728. bit = krait_get_pmresrn_event(region);
  1729. bit -= krait_get_pmresrn_event(0);
  1730. bit += group;
  1731. /*
  1732. * Lower bits are reserved for use by the counters (see
  1733. * armv7pmu_get_event_idx() for more info)
  1734. */
  1735. bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
  1736. return bit;
  1737. }
  1738. /*
  1739. * We check for column exclusion constraints here.
  1740. * Two events cant use the same group within a pmresr register.
  1741. */
  1742. static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
  1743. struct perf_event *event)
  1744. {
  1745. int idx;
  1746. int bit = -1;
  1747. unsigned int prefix;
  1748. unsigned int region;
  1749. unsigned int code;
  1750. unsigned int group;
  1751. bool krait_event;
  1752. struct hw_perf_event *hwc = &event->hw;
  1753. region = (hwc->config_base >> 12) & 0xf;
  1754. code = (hwc->config_base >> 4) & 0xff;
  1755. group = (hwc->config_base >> 0) & 0xf;
  1756. krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
  1757. if (krait_event) {
  1758. /* Ignore invalid events */
  1759. if (group > 3 || region > 2)
  1760. return -EINVAL;
  1761. prefix = hwc->config_base & KRAIT_EVENT_MASK;
  1762. if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
  1763. return -EINVAL;
  1764. if (prefix == VENUM_EVENT && (code & 0xe0))
  1765. return -EINVAL;
  1766. bit = krait_event_to_bit(event, region, group);
  1767. if (test_and_set_bit(bit, cpuc->used_mask))
  1768. return -EAGAIN;
  1769. }
  1770. idx = armv7pmu_get_event_idx(cpuc, event);
  1771. if (idx < 0 && bit >= 0)
  1772. clear_bit(bit, cpuc->used_mask);
  1773. return idx;
  1774. }
  1775. static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
  1776. struct perf_event *event)
  1777. {
  1778. int bit;
  1779. struct hw_perf_event *hwc = &event->hw;
  1780. unsigned int region;
  1781. unsigned int group;
  1782. bool krait_event;
  1783. region = (hwc->config_base >> 12) & 0xf;
  1784. group = (hwc->config_base >> 0) & 0xf;
  1785. krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
  1786. if (krait_event) {
  1787. bit = krait_event_to_bit(event, region, group);
  1788. clear_bit(bit, cpuc->used_mask);
  1789. }
  1790. }
  1791. static int krait_pmu_init(struct arm_pmu *cpu_pmu)
  1792. {
  1793. armv7pmu_init(cpu_pmu);
  1794. cpu_pmu->name = "ARMv7 Krait";
  1795. /* Some early versions of Krait don't support PC write events */
  1796. if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
  1797. "qcom,no-pc-write"))
  1798. cpu_pmu->map_event = krait_map_event_no_branch;
  1799. else
  1800. cpu_pmu->map_event = krait_map_event;
  1801. cpu_pmu->num_events = armv7_read_num_pmnc_events();
  1802. cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
  1803. cpu_pmu->reset = krait_pmu_reset;
  1804. cpu_pmu->enable = krait_pmu_enable_event;
  1805. cpu_pmu->disable = krait_pmu_disable_event;
  1806. cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
  1807. cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
  1808. return 0;
  1809. }
  1810. #else
  1811. static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
  1812. {
  1813. return -ENODEV;
  1814. }
  1815. static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
  1816. {
  1817. return -ENODEV;
  1818. }
  1819. static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
  1820. {
  1821. return -ENODEV;
  1822. }
  1823. static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
  1824. {
  1825. return -ENODEV;
  1826. }
  1827. static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
  1828. {
  1829. return -ENODEV;
  1830. }
  1831. static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
  1832. {
  1833. return -ENODEV;
  1834. }
  1835. static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
  1836. {
  1837. return -ENODEV;
  1838. }
  1839. static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
  1840. {
  1841. return -ENODEV;
  1842. }
  1843. #endif /* CONFIG_CPU_V7 */