123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040 |
- /*
- * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
- *
- * ARMv7 support: Jean Pihet <jpihet@mvista.com>
- * 2010 (c) MontaVista Software, LLC.
- *
- * Copied from ARMv6 code, with the low level code inspired
- * by the ARMv7 Oprofile code.
- *
- * Cortex-A8 has up to 4 configurable performance counters and
- * a single cycle counter.
- * Cortex-A9 has up to 31 configurable performance counters and
- * a single cycle counter.
- *
- * All counters can be enabled/disabled and IRQ masked separately. The cycle
- * counter and all 4 performance counters together can be reset separately.
- */
- #ifdef CONFIG_CPU_V7
- #include <asm/cp15.h>
- #include <asm/vfp.h>
- #include "../vfp/vfpinstr.h"
- /*
- * Common ARMv7 event types
- *
- * Note: An implementation may not be able to count all of these events
- * but the encodings are considered to be `reserved' in the case that
- * they are not available.
- */
- enum armv7_perf_types {
- ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
- ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
- ARMV7_PERFCTR_ITLB_REFILL = 0x02,
- ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
- ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
- ARMV7_PERFCTR_DTLB_REFILL = 0x05,
- ARMV7_PERFCTR_MEM_READ = 0x06,
- ARMV7_PERFCTR_MEM_WRITE = 0x07,
- ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
- ARMV7_PERFCTR_EXC_TAKEN = 0x09,
- ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
- ARMV7_PERFCTR_CID_WRITE = 0x0B,
- /*
- * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
- * It counts:
- * - all (taken) branch instructions,
- * - instructions that explicitly write the PC,
- * - exception generating instructions.
- */
- ARMV7_PERFCTR_PC_WRITE = 0x0C,
- ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
- ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
- ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
- ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
- ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
- ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
- /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
- ARMV7_PERFCTR_MEM_ACCESS = 0x13,
- ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
- ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
- ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
- ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
- ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
- ARMV7_PERFCTR_BUS_ACCESS = 0x19,
- ARMV7_PERFCTR_MEM_ERROR = 0x1A,
- ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
- ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
- ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
- ARMV7_PERFCTR_CPU_CYCLES = 0xFF
- };
- /* ARMv7 Cortex-A8 specific event types */
- enum armv7_a8_perf_types {
- ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
- ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
- ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
- ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
- };
- /* ARMv7 Cortex-A9 specific event types */
- enum armv7_a9_perf_types {
- ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
- ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
- ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
- };
- /* ARMv7 Cortex-A5 specific event types */
- enum armv7_a5_perf_types {
- ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
- ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
- };
- /* ARMv7 Cortex-A15 specific event types */
- enum armv7_a15_perf_types {
- ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
- ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
- ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
- ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
- ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
- ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
- ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
- ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
- ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
- ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
- ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
- };
- /* ARMv7 Cortex-A12 specific event types */
- enum armv7_a12_perf_types {
- ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
- ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
- ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
- ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
- ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76,
- ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7,
- };
- /* ARMv7 Krait specific event types */
- enum krait_perf_types {
- KRAIT_PMRESR0_GROUP0 = 0xcc,
- KRAIT_PMRESR1_GROUP0 = 0xd0,
- KRAIT_PMRESR2_GROUP0 = 0xd4,
- KRAIT_VPMRESR0_GROUP0 = 0xd8,
- KRAIT_PERFCTR_L1_ICACHE_ACCESS = 0x10011,
- KRAIT_PERFCTR_L1_ICACHE_MISS = 0x10010,
- KRAIT_PERFCTR_L1_ITLB_ACCESS = 0x12222,
- KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
- };
- /*
- * Cortex-A8 HW events mapping
- *
- * The hardware events that we support. We do support cache operations but
- * we have harvard caches and no way to combine instruction and data
- * accesses/misses in hardware.
- */
- static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
- };
- static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- /*
- * The performance counters don't differentiate between read
- * and write accesses/misses so this isn't strictly correct,
- * but it's the best we can do. Writes and reads get
- * combined.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- };
- /*
- * Cortex-A9 HW events mapping
- */
- static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
- };
- static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- /*
- * The performance counters don't differentiate between read
- * and write accesses/misses so this isn't strictly correct,
- * but it's the best we can do. Writes and reads get
- * combined.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- };
- /*
- * Cortex-A5 HW events mapping
- */
- static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
- };
- static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
- [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
- },
- },
- [C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- /*
- * The prefetch counters don't differentiate between the I
- * side and the D side.
- */
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
- [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- };
- /*
- * Cortex-A15 HW events mapping
- */
- static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
- };
- static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
- [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
- [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- /*
- * Not all performance counters differentiate between read
- * and write accesses/misses so we're not always strictly
- * correct, but it's the best we can do. Writes and reads get
- * combined in these cases.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
- [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
- [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- };
- /*
- * Cortex-A7 HW events mapping
- */
- static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
- };
- static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- /*
- * The performance counters don't differentiate between read
- * and write accesses/misses so this isn't strictly correct,
- * but it's the best we can do. Writes and reads get
- * combined.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- };
- /*
- * Cortex-A12 HW events mapping
- */
- static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
- };
- static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- /*
- * Not all performance counters differentiate between read
- * and write accesses/misses so we're not always strictly
- * correct, but it's the best we can do. Writes and reads get
- * combined in these cases.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- };
- /*
- * Krait HW events mapping
- */
- static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
- };
- static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
- };
- static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- /*
- * The performance counters don't differentiate between read
- * and write accesses/misses so this isn't strictly correct,
- * but it's the best we can do. Writes and reads get
- * combined.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- };
- /*
- * Perf Events' indices
- */
- #define ARMV7_IDX_CYCLE_COUNTER 0
- #define ARMV7_IDX_COUNTER0 1
- #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
- (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
- #define ARMV7_MAX_COUNTERS 32
- #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
- /*
- * ARMv7 low level PMNC access
- */
- /*
- * Perf Event to low level counters mapping
- */
- #define ARMV7_IDX_TO_COUNTER(x) \
- (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
- /*
- * Per-CPU PMNC: config reg
- */
- #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
- #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
- #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
- #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
- #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
- #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
- #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
- #define ARMV7_PMNC_N_MASK 0x1f
- #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
- /*
- * FLAG: counters overflow flag status reg
- */
- #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
- #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
- /*
- * PMXEVTYPER: Event selection reg
- */
- #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
- #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
- /*
- * Event filters for PMUv2
- */
- #define ARMV7_EXCLUDE_PL1 (1 << 31)
- #define ARMV7_EXCLUDE_USER (1 << 30)
- #define ARMV7_INCLUDE_HYP (1 << 27)
- static inline u32 armv7_pmnc_read(void)
- {
- u32 val;
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
- return val;
- }
- static inline void armv7_pmnc_write(u32 val)
- {
- val &= ARMV7_PMNC_MASK;
- isb();
- asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
- }
- static inline int armv7_pmnc_has_overflowed(u32 pmnc)
- {
- return pmnc & ARMV7_OVERFLOWED_MASK;
- }
- static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
- {
- return idx >= ARMV7_IDX_CYCLE_COUNTER &&
- idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
- }
- static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
- {
- return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
- }
- static inline int armv7_pmnc_select_counter(int idx)
- {
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
- isb();
- return idx;
- }
- static inline u32 armv7pmu_read_counter(struct perf_event *event)
- {
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- u32 value = 0;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u reading wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV7_IDX_CYCLE_COUNTER)
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
- else if (armv7_pmnc_select_counter(idx) == idx)
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
- return value;
- }
- static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
- {
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
- pr_err("CPU%u writing wrong counter %d\n",
- smp_processor_id(), idx);
- else if (idx == ARMV7_IDX_CYCLE_COUNTER)
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
- else if (armv7_pmnc_select_counter(idx) == idx)
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
- }
- static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
- {
- if (armv7_pmnc_select_counter(idx) == idx) {
- val &= ARMV7_EVTYPE_MASK;
- asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
- }
- }
- static inline int armv7_pmnc_enable_counter(int idx)
- {
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
- return idx;
- }
- static inline int armv7_pmnc_disable_counter(int idx)
- {
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
- return idx;
- }
- static inline int armv7_pmnc_enable_intens(int idx)
- {
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
- return idx;
- }
- static inline int armv7_pmnc_disable_intens(int idx)
- {
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
- isb();
- /* Clear the overflow flag in case an interrupt is pending. */
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
- isb();
- return idx;
- }
- static inline u32 armv7_pmnc_getreset_flags(void)
- {
- u32 val;
- /* Read */
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- /* Write to clear flags */
- val &= ARMV7_FLAG_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
- return val;
- }
- #ifdef DEBUG
- static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
- {
- u32 val;
- unsigned int cnt;
- printk(KERN_INFO "PMNC registers dump:\n");
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- printk(KERN_INFO "PMNC =0x%08x\n", val);
- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
- printk(KERN_INFO "CNTENS=0x%08x\n", val);
- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
- printk(KERN_INFO "INTENS=0x%08x\n", val);
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- printk(KERN_INFO "FLAGS =0x%08x\n", val);
- asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
- printk(KERN_INFO "SELECT=0x%08x\n", val);
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
- printk(KERN_INFO "CCNT =0x%08x\n", val);
- for (cnt = ARMV7_IDX_COUNTER0;
- cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
- armv7_pmnc_select_counter(cnt);
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- printk(KERN_INFO "CNT[%d] count =0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
- asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
- }
- }
- #endif
- static void armv7pmu_enable_event(struct perf_event *event)
- {
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- int idx = hwc->idx;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
- pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
- smp_processor_id(), idx);
- return;
- }
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
- /*
- * Set event (if destined for PMNx counters)
- * We only need to set the event for the cycle counter if we
- * have the ability to perform event filtering.
- */
- if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
- armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /*
- * Enable interrupt for this counter
- */
- armv7_pmnc_enable_intens(idx);
- /*
- * Enable counter
- */
- armv7_pmnc_enable_counter(idx);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
- }
- static void armv7pmu_disable_event(struct perf_event *event)
- {
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- int idx = hwc->idx;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
- pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
- smp_processor_id(), idx);
- return;
- }
- /*
- * Disable counter and interrupt
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
- /*
- * Disable interrupt for this counter
- */
- armv7_pmnc_disable_intens(idx);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
- }
- static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
- {
- u32 pmnc;
- struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
- struct pt_regs *regs;
- int idx;
- /*
- * Get and reset the IRQ flags
- */
- pmnc = armv7_pmnc_getreset_flags();
- /*
- * Did an overflow occur?
- */
- if (!armv7_pmnc_has_overflowed(pmnc))
- return IRQ_NONE;
- /*
- * Handle the counter(s) overflow(s)
- */
- regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
- struct hw_perf_event *hwc;
- /* Ignore if we don't have an event. */
- if (!event)
- continue;
- /*
- * We have a single interrupt for all counters. Check that
- * each counter has overflowed before we process it.
- */
- if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
- continue;
- hwc = &event->hw;
- armpmu_event_update(event);
- perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event))
- continue;
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
- }
- /*
- * Handle the pending perf events.
- *
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
- * will not work.
- */
- irq_work_run();
- return IRQ_HANDLED;
- }
- static void armv7pmu_start(struct arm_pmu *cpu_pmu)
- {
- unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Enable all counters */
- armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
- }
- static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
- {
- unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable all counters */
- armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
- }
- static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
- {
- int idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
- /* Always place a cycle counter into the cycle counter. */
- if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
- if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return -EAGAIN;
- return ARMV7_IDX_CYCLE_COUNTER;
- }
- /*
- * For anything other than a cycle counter, try and use
- * the events counters
- */
- for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
- if (!test_and_set_bit(idx, cpuc->used_mask))
- return idx;
- }
- /* The counters are all in use. */
- return -EAGAIN;
- }
- /*
- * Add an event filter to a given event. This will only work for PMUv2 PMUs.
- */
- static int armv7pmu_set_event_filter(struct hw_perf_event *event,
- struct perf_event_attr *attr)
- {
- unsigned long config_base = 0;
- if (attr->exclude_idle)
- return -EPERM;
- if (attr->exclude_user)
- config_base |= ARMV7_EXCLUDE_USER;
- if (attr->exclude_kernel)
- config_base |= ARMV7_EXCLUDE_PL1;
- if (!attr->exclude_hv)
- config_base |= ARMV7_INCLUDE_HYP;
- /*
- * Install the filter into config_base as this is used to
- * construct the event type.
- */
- event->config_base = config_base;
- return 0;
- }
- static void armv7pmu_reset(void *info)
- {
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
- /* The counter and interrupt enable registers are unknown at reset. */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
- armv7_pmnc_disable_counter(idx);
- armv7_pmnc_disable_intens(idx);
- }
- /* Initialize & Reset PMNC: C and P bits */
- armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
- }
- static int armv7_a8_map_event(struct perf_event *event)
- {
- return armpmu_map_event(event, &armv7_a8_perf_map,
- &armv7_a8_perf_cache_map, 0xFF);
- }
- static int armv7_a9_map_event(struct perf_event *event)
- {
- return armpmu_map_event(event, &armv7_a9_perf_map,
- &armv7_a9_perf_cache_map, 0xFF);
- }
- static int armv7_a5_map_event(struct perf_event *event)
- {
- return armpmu_map_event(event, &armv7_a5_perf_map,
- &armv7_a5_perf_cache_map, 0xFF);
- }
- static int armv7_a15_map_event(struct perf_event *event)
- {
- return armpmu_map_event(event, &armv7_a15_perf_map,
- &armv7_a15_perf_cache_map, 0xFF);
- }
- static int armv7_a7_map_event(struct perf_event *event)
- {
- return armpmu_map_event(event, &armv7_a7_perf_map,
- &armv7_a7_perf_cache_map, 0xFF);
- }
- static int armv7_a12_map_event(struct perf_event *event)
- {
- return armpmu_map_event(event, &armv7_a12_perf_map,
- &armv7_a12_perf_cache_map, 0xFF);
- }
- static int krait_map_event(struct perf_event *event)
- {
- return armpmu_map_event(event, &krait_perf_map,
- &krait_perf_cache_map, 0xFFFFF);
- }
- static int krait_map_event_no_branch(struct perf_event *event)
- {
- return armpmu_map_event(event, &krait_perf_map_no_branch,
- &krait_perf_cache_map, 0xFFFFF);
- }
- static void armv7pmu_init(struct arm_pmu *cpu_pmu)
- {
- cpu_pmu->handle_irq = armv7pmu_handle_irq;
- cpu_pmu->enable = armv7pmu_enable_event;
- cpu_pmu->disable = armv7pmu_disable_event;
- cpu_pmu->read_counter = armv7pmu_read_counter;
- cpu_pmu->write_counter = armv7pmu_write_counter;
- cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
- cpu_pmu->start = armv7pmu_start;
- cpu_pmu->stop = armv7pmu_stop;
- cpu_pmu->reset = armv7pmu_reset;
- cpu_pmu->max_period = (1LLU << 32) - 1;
- };
- static u32 armv7_read_num_pmnc_events(void)
- {
- u32 nb_cnt;
- /* Read the nb of CNTx counters supported from PMNC */
- nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
- /* Add the CPU cycles counter and return */
- return nb_cnt + 1;
- }
- static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A8";
- cpu_pmu->map_event = armv7_a8_map_event;
- cpu_pmu->num_events = armv7_read_num_pmnc_events();
- return 0;
- }
- static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A9";
- cpu_pmu->map_event = armv7_a9_map_event;
- cpu_pmu->num_events = armv7_read_num_pmnc_events();
- return 0;
- }
- static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A5";
- cpu_pmu->map_event = armv7_a5_map_event;
- cpu_pmu->num_events = armv7_read_num_pmnc_events();
- return 0;
- }
- static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A15";
- cpu_pmu->map_event = armv7_a15_map_event;
- cpu_pmu->num_events = armv7_read_num_pmnc_events();
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- return 0;
- }
- static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A7";
- cpu_pmu->map_event = armv7_a7_map_event;
- cpu_pmu->num_events = armv7_read_num_pmnc_events();
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- return 0;
- }
- static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A12";
- cpu_pmu->map_event = armv7_a12_map_event;
- cpu_pmu->num_events = armv7_read_num_pmnc_events();
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- return 0;
- }
- static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7_a12_pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A17";
- return 0;
- }
- /*
- * Krait Performance Monitor Region Event Selection Register (PMRESRn)
- *
- * 31 30 24 16 8 0
- * +--------------------------------+
- * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
- * +--------------------------------+
- * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
- * +--------------------------------+
- * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
- * +--------------------------------+
- * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
- * +--------------------------------+
- * EN | G=3 | G=2 | G=1 | G=0
- *
- * Event Encoding:
- *
- * hwc->config_base = 0xNRCCG
- *
- * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
- * R = region register
- * CC = class of events the group G is choosing from
- * G = group or particular event
- *
- * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
- *
- * A region (R) corresponds to a piece of the CPU (execution unit, instruction
- * unit, etc.) while the event code (CC) corresponds to a particular class of
- * events (interrupts for example). An event code is broken down into
- * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
- * example).
- */
- #define KRAIT_EVENT (1 << 16)
- #define VENUM_EVENT (2 << 16)
- #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
- #define PMRESRn_EN BIT(31)
- static u32 krait_read_pmresrn(int n)
- {
- u32 val;
- switch (n) {
- case 0:
- asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
- break;
- case 1:
- asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
- break;
- case 2:
- asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
- break;
- default:
- BUG(); /* Should be validated in krait_pmu_get_event_idx() */
- }
- return val;
- }
- static void krait_write_pmresrn(int n, u32 val)
- {
- switch (n) {
- case 0:
- asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
- break;
- case 1:
- asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
- break;
- case 2:
- asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
- break;
- default:
- BUG(); /* Should be validated in krait_pmu_get_event_idx() */
- }
- }
- static u32 krait_read_vpmresr0(void)
- {
- u32 val;
- asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
- return val;
- }
- static void krait_write_vpmresr0(u32 val)
- {
- asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
- }
- static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
- {
- u32 venum_new_val;
- u32 fp_new_val;
- BUG_ON(preemptible());
- /* CPACR Enable CP10 and CP11 access */
- *venum_orig_val = get_copro_access();
- venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
- set_copro_access(venum_new_val);
- /* Enable FPEXC */
- *fp_orig_val = fmrx(FPEXC);
- fp_new_val = *fp_orig_val | FPEXC_EN;
- fmxr(FPEXC, fp_new_val);
- }
- static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
- {
- BUG_ON(preemptible());
- /* Restore FPEXC */
- fmxr(FPEXC, fp_orig_val);
- isb();
- /* Restore CPACR */
- set_copro_access(venum_orig_val);
- }
- static u32 krait_get_pmresrn_event(unsigned int region)
- {
- static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
- KRAIT_PMRESR1_GROUP0,
- KRAIT_PMRESR2_GROUP0 };
- return pmresrn_table[region];
- }
- static void krait_evt_setup(int idx, u32 config_base)
- {
- u32 val;
- u32 mask;
- u32 vval, fval;
- unsigned int region;
- unsigned int group;
- unsigned int code;
- unsigned int group_shift;
- bool venum_event;
- venum_event = !!(config_base & VENUM_EVENT);
- region = (config_base >> 12) & 0xf;
- code = (config_base >> 4) & 0xff;
- group = (config_base >> 0) & 0xf;
- group_shift = group * 8;
- mask = 0xff << group_shift;
- /* Configure evtsel for the region and group */
- if (venum_event)
- val = KRAIT_VPMRESR0_GROUP0;
- else
- val = krait_get_pmresrn_event(region);
- val += group;
- /* Mix in mode-exclusion bits */
- val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
- armv7_pmnc_write_evtsel(idx, val);
- asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
- if (venum_event) {
- krait_pre_vpmresr0(&vval, &fval);
- val = krait_read_vpmresr0();
- val &= ~mask;
- val |= code << group_shift;
- val |= PMRESRn_EN;
- krait_write_vpmresr0(val);
- krait_post_vpmresr0(vval, fval);
- } else {
- val = krait_read_pmresrn(region);
- val &= ~mask;
- val |= code << group_shift;
- val |= PMRESRn_EN;
- krait_write_pmresrn(region, val);
- }
- }
- static u32 krait_clear_pmresrn_group(u32 val, int group)
- {
- u32 mask;
- int group_shift;
- group_shift = group * 8;
- mask = 0xff << group_shift;
- val &= ~mask;
- /* Don't clear enable bit if entire region isn't disabled */
- if (val & ~PMRESRn_EN)
- return val |= PMRESRn_EN;
- return 0;
- }
- static void krait_clearpmu(u32 config_base)
- {
- u32 val;
- u32 vval, fval;
- unsigned int region;
- unsigned int group;
- bool venum_event;
- venum_event = !!(config_base & VENUM_EVENT);
- region = (config_base >> 12) & 0xf;
- group = (config_base >> 0) & 0xf;
- if (venum_event) {
- krait_pre_vpmresr0(&vval, &fval);
- val = krait_read_vpmresr0();
- val = krait_clear_pmresrn_group(val, group);
- krait_write_vpmresr0(val);
- krait_post_vpmresr0(vval, fval);
- } else {
- val = krait_read_pmresrn(region);
- val = krait_clear_pmresrn_group(val, group);
- krait_write_pmresrn(region, val);
- }
- }
- static void krait_pmu_disable_event(struct perf_event *event)
- {
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- /* Disable counter and interrupt */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
- /*
- * Clear pmresr code (if destined for PMNx counters)
- */
- if (hwc->config_base & KRAIT_EVENT_MASK)
- krait_clearpmu(hwc->config_base);
- /* Disable interrupt for this counter */
- armv7_pmnc_disable_intens(idx);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
- }
- static void krait_pmu_enable_event(struct perf_event *event)
- {
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
- /*
- * Set event (if destined for PMNx counters)
- * We set the event for the cycle counter because we
- * have the ability to perform event filtering.
- */
- if (hwc->config_base & KRAIT_EVENT_MASK)
- krait_evt_setup(idx, hwc->config_base);
- else
- armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
- armv7_pmnc_enable_intens(idx);
- /* Enable counter */
- armv7_pmnc_enable_counter(idx);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
- }
- static void krait_pmu_reset(void *info)
- {
- u32 vval, fval;
- armv7pmu_reset(info);
- /* Clear all pmresrs */
- krait_write_pmresrn(0, 0);
- krait_write_pmresrn(1, 0);
- krait_write_pmresrn(2, 0);
- krait_pre_vpmresr0(&vval, &fval);
- krait_write_vpmresr0(0);
- krait_post_vpmresr0(vval, fval);
- }
- static int krait_event_to_bit(struct perf_event *event, unsigned int region,
- unsigned int group)
- {
- int bit;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- if (hwc->config_base & VENUM_EVENT)
- bit = KRAIT_VPMRESR0_GROUP0;
- else
- bit = krait_get_pmresrn_event(region);
- bit -= krait_get_pmresrn_event(0);
- bit += group;
- /*
- * Lower bits are reserved for use by the counters (see
- * armv7pmu_get_event_idx() for more info)
- */
- bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
- return bit;
- }
- /*
- * We check for column exclusion constraints here.
- * Two events cant use the same group within a pmresr register.
- */
- static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
- {
- int idx;
- int bit = -1;
- unsigned int prefix;
- unsigned int region;
- unsigned int code;
- unsigned int group;
- bool krait_event;
- struct hw_perf_event *hwc = &event->hw;
- region = (hwc->config_base >> 12) & 0xf;
- code = (hwc->config_base >> 4) & 0xff;
- group = (hwc->config_base >> 0) & 0xf;
- krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
- if (krait_event) {
- /* Ignore invalid events */
- if (group > 3 || region > 2)
- return -EINVAL;
- prefix = hwc->config_base & KRAIT_EVENT_MASK;
- if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
- return -EINVAL;
- if (prefix == VENUM_EVENT && (code & 0xe0))
- return -EINVAL;
- bit = krait_event_to_bit(event, region, group);
- if (test_and_set_bit(bit, cpuc->used_mask))
- return -EAGAIN;
- }
- idx = armv7pmu_get_event_idx(cpuc, event);
- if (idx < 0 && bit >= 0)
- clear_bit(bit, cpuc->used_mask);
- return idx;
- }
- static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
- {
- int bit;
- struct hw_perf_event *hwc = &event->hw;
- unsigned int region;
- unsigned int group;
- bool krait_event;
- region = (hwc->config_base >> 12) & 0xf;
- group = (hwc->config_base >> 0) & 0xf;
- krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
- if (krait_event) {
- bit = krait_event_to_bit(event, region, group);
- clear_bit(bit, cpuc->used_mask);
- }
- }
- static int krait_pmu_init(struct arm_pmu *cpu_pmu)
- {
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Krait";
- /* Some early versions of Krait don't support PC write events */
- if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
- "qcom,no-pc-write"))
- cpu_pmu->map_event = krait_map_event_no_branch;
- else
- cpu_pmu->map_event = krait_map_event;
- cpu_pmu->num_events = armv7_read_num_pmnc_events();
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->reset = krait_pmu_reset;
- cpu_pmu->enable = krait_pmu_enable_event;
- cpu_pmu->disable = krait_pmu_disable_event;
- cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
- cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
- return 0;
- }
- #else
- static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
- {
- return -ENODEV;
- }
- #endif /* CONFIG_CPU_V7 */
|