core-book3s.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235
  1. /*
  2. * Performance event support - powerpc architecture code
  3. *
  4. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/perf_event.h>
  14. #include <linux/percpu.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/uaccess.h>
  17. #include <asm/reg.h>
  18. #include <asm/pmc.h>
  19. #include <asm/machdep.h>
  20. #include <asm/firmware.h>
  21. #include <asm/ptrace.h>
  22. #include <asm/code-patching.h>
  23. #define BHRB_MAX_ENTRIES 32
  24. #define BHRB_TARGET 0x0000000000000002
  25. #define BHRB_PREDICTION 0x0000000000000001
  26. #define BHRB_EA 0xFFFFFFFFFFFFFFFCUL
  27. struct cpu_hw_events {
  28. int n_events;
  29. int n_percpu;
  30. int disabled;
  31. int n_added;
  32. int n_limited;
  33. u8 pmcs_enabled;
  34. struct perf_event *event[MAX_HWEVENTS];
  35. u64 events[MAX_HWEVENTS];
  36. unsigned int flags[MAX_HWEVENTS];
  37. /*
  38. * The order of the MMCR array is:
  39. * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
  40. * - 32-bit, MMCR0, MMCR1, MMCR2
  41. */
  42. unsigned long mmcr[4];
  43. struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
  44. u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  45. u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  46. unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  47. unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  48. unsigned int txn_flags;
  49. int n_txn_start;
  50. /* BHRB bits */
  51. u64 bhrb_filter; /* BHRB HW branch filter */
  52. unsigned int bhrb_users;
  53. void *bhrb_context;
  54. struct perf_branch_stack bhrb_stack;
  55. struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
  56. u64 ic_init;
  57. };
  58. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  59. static struct power_pmu *ppmu;
  60. /*
  61. * Normally, to ignore kernel events we set the FCS (freeze counters
  62. * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  63. * hypervisor bit set in the MSR, or if we are running on a processor
  64. * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  65. * then we need to use the FCHV bit to ignore kernel events.
  66. */
  67. static unsigned int freeze_events_kernel = MMCR0_FCS;
  68. /*
  69. * 32-bit doesn't have MMCRA but does have an MMCR2,
  70. * and a few other names are different.
  71. */
  72. #ifdef CONFIG_PPC32
  73. #define MMCR0_FCHV 0
  74. #define MMCR0_PMCjCE MMCR0_PMCnCE
  75. #define MMCR0_FC56 0
  76. #define MMCR0_PMAO 0
  77. #define MMCR0_EBE 0
  78. #define MMCR0_BHRBA 0
  79. #define MMCR0_PMCC 0
  80. #define MMCR0_PMCC_U6 0
  81. #define SPRN_MMCRA SPRN_MMCR2
  82. #define MMCRA_SAMPLE_ENABLE 0
  83. static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  84. {
  85. return 0;
  86. }
  87. static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
  88. static inline u32 perf_get_misc_flags(struct pt_regs *regs)
  89. {
  90. return 0;
  91. }
  92. static inline void perf_read_regs(struct pt_regs *regs)
  93. {
  94. regs->result = 0;
  95. }
  96. static inline int perf_intr_is_nmi(struct pt_regs *regs)
  97. {
  98. return 0;
  99. }
  100. static inline int siar_valid(struct pt_regs *regs)
  101. {
  102. return 1;
  103. }
  104. static bool is_ebb_event(struct perf_event *event) { return false; }
  105. static int ebb_event_check(struct perf_event *event) { return 0; }
  106. static void ebb_event_add(struct perf_event *event) { }
  107. static void ebb_switch_out(unsigned long mmcr0) { }
  108. static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
  109. {
  110. return cpuhw->mmcr[0];
  111. }
  112. static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
  113. static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
  114. static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
  115. static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
  116. static void pmao_restore_workaround(bool ebb) { }
  117. static bool use_ic(u64 event)
  118. {
  119. return false;
  120. }
  121. #endif /* CONFIG_PPC32 */
  122. static bool regs_use_siar(struct pt_regs *regs)
  123. {
  124. /*
  125. * When we take a performance monitor exception the regs are setup
  126. * using perf_read_regs() which overloads some fields, in particular
  127. * regs->result to tell us whether to use SIAR.
  128. *
  129. * However if the regs are from another exception, eg. a syscall, then
  130. * they have not been setup using perf_read_regs() and so regs->result
  131. * is something random.
  132. */
  133. return ((TRAP(regs) == 0xf00) && regs->result);
  134. }
  135. /*
  136. * Things that are specific to 64-bit implementations.
  137. */
  138. #ifdef CONFIG_PPC64
  139. static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  140. {
  141. unsigned long mmcra = regs->dsisr;
  142. if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
  143. unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
  144. if (slot > 1)
  145. return 4 * (slot - 1);
  146. }
  147. return 0;
  148. }
  149. /*
  150. * The user wants a data address recorded.
  151. * If we're not doing instruction sampling, give them the SDAR
  152. * (sampled data address). If we are doing instruction sampling, then
  153. * only give them the SDAR if it corresponds to the instruction
  154. * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
  155. * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
  156. */
  157. static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
  158. {
  159. unsigned long mmcra = regs->dsisr;
  160. bool sdar_valid;
  161. if (ppmu->flags & PPMU_HAS_SIER)
  162. sdar_valid = regs->dar & SIER_SDAR_VALID;
  163. else {
  164. unsigned long sdsync;
  165. if (ppmu->flags & PPMU_SIAR_VALID)
  166. sdsync = POWER7P_MMCRA_SDAR_VALID;
  167. else if (ppmu->flags & PPMU_ALT_SIPR)
  168. sdsync = POWER6_MMCRA_SDSYNC;
  169. else if (ppmu->flags & PPMU_NO_SIAR)
  170. sdsync = MMCRA_SAMPLE_ENABLE;
  171. else
  172. sdsync = MMCRA_SDSYNC;
  173. sdar_valid = mmcra & sdsync;
  174. }
  175. if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
  176. *addrp = mfspr(SPRN_SDAR);
  177. }
  178. static bool regs_sihv(struct pt_regs *regs)
  179. {
  180. unsigned long sihv = MMCRA_SIHV;
  181. if (ppmu->flags & PPMU_HAS_SIER)
  182. return !!(regs->dar & SIER_SIHV);
  183. if (ppmu->flags & PPMU_ALT_SIPR)
  184. sihv = POWER6_MMCRA_SIHV;
  185. return !!(regs->dsisr & sihv);
  186. }
  187. static bool regs_sipr(struct pt_regs *regs)
  188. {
  189. unsigned long sipr = MMCRA_SIPR;
  190. if (ppmu->flags & PPMU_HAS_SIER)
  191. return !!(regs->dar & SIER_SIPR);
  192. if (ppmu->flags & PPMU_ALT_SIPR)
  193. sipr = POWER6_MMCRA_SIPR;
  194. return !!(regs->dsisr & sipr);
  195. }
  196. static inline u32 perf_flags_from_msr(struct pt_regs *regs)
  197. {
  198. if (regs->msr & MSR_PR)
  199. return PERF_RECORD_MISC_USER;
  200. if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
  201. return PERF_RECORD_MISC_HYPERVISOR;
  202. return PERF_RECORD_MISC_KERNEL;
  203. }
  204. static inline u32 perf_get_misc_flags(struct pt_regs *regs)
  205. {
  206. bool use_siar = regs_use_siar(regs);
  207. if (!use_siar)
  208. return perf_flags_from_msr(regs);
  209. /*
  210. * If we don't have flags in MMCRA, rather than using
  211. * the MSR, we intuit the flags from the address in
  212. * SIAR which should give slightly more reliable
  213. * results
  214. */
  215. if (ppmu->flags & PPMU_NO_SIPR) {
  216. unsigned long siar = mfspr(SPRN_SIAR);
  217. if (is_kernel_addr(siar))
  218. return PERF_RECORD_MISC_KERNEL;
  219. return PERF_RECORD_MISC_USER;
  220. }
  221. /* PR has priority over HV, so order below is important */
  222. if (regs_sipr(regs))
  223. return PERF_RECORD_MISC_USER;
  224. if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
  225. return PERF_RECORD_MISC_HYPERVISOR;
  226. return PERF_RECORD_MISC_KERNEL;
  227. }
  228. /*
  229. * Overload regs->dsisr to store MMCRA so we only need to read it once
  230. * on each interrupt.
  231. * Overload regs->dar to store SIER if we have it.
  232. * Overload regs->result to specify whether we should use the MSR (result
  233. * is zero) or the SIAR (result is non zero).
  234. */
  235. static inline void perf_read_regs(struct pt_regs *regs)
  236. {
  237. unsigned long mmcra = mfspr(SPRN_MMCRA);
  238. int marked = mmcra & MMCRA_SAMPLE_ENABLE;
  239. int use_siar;
  240. regs->dsisr = mmcra;
  241. if (ppmu->flags & PPMU_HAS_SIER)
  242. regs->dar = mfspr(SPRN_SIER);
  243. /*
  244. * If this isn't a PMU exception (eg a software event) the SIAR is
  245. * not valid. Use pt_regs.
  246. *
  247. * If it is a marked event use the SIAR.
  248. *
  249. * If the PMU doesn't update the SIAR for non marked events use
  250. * pt_regs.
  251. *
  252. * If the PMU has HV/PR flags then check to see if they
  253. * place the exception in userspace. If so, use pt_regs. In
  254. * continuous sampling mode the SIAR and the PMU exception are
  255. * not synchronised, so they may be many instructions apart.
  256. * This can result in confusing backtraces. We still want
  257. * hypervisor samples as well as samples in the kernel with
  258. * interrupts off hence the userspace check.
  259. */
  260. if (TRAP(regs) != 0xf00)
  261. use_siar = 0;
  262. else if ((ppmu->flags & PPMU_NO_SIAR))
  263. use_siar = 0;
  264. else if (marked)
  265. use_siar = 1;
  266. else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
  267. use_siar = 0;
  268. else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
  269. use_siar = 0;
  270. else
  271. use_siar = 1;
  272. regs->result = use_siar;
  273. }
  274. /*
  275. * If interrupts were soft-disabled when a PMU interrupt occurs, treat
  276. * it as an NMI.
  277. */
  278. static inline int perf_intr_is_nmi(struct pt_regs *regs)
  279. {
  280. return !regs->softe;
  281. }
  282. /*
  283. * On processors like P7+ that have the SIAR-Valid bit, marked instructions
  284. * must be sampled only if the SIAR-valid bit is set.
  285. *
  286. * For unmarked instructions and for processors that don't have the SIAR-Valid
  287. * bit, assume that SIAR is valid.
  288. */
  289. static inline int siar_valid(struct pt_regs *regs)
  290. {
  291. unsigned long mmcra = regs->dsisr;
  292. int marked = mmcra & MMCRA_SAMPLE_ENABLE;
  293. if (marked) {
  294. if (ppmu->flags & PPMU_HAS_SIER)
  295. return regs->dar & SIER_SIAR_VALID;
  296. if (ppmu->flags & PPMU_SIAR_VALID)
  297. return mmcra & POWER7P_MMCRA_SIAR_VALID;
  298. }
  299. return 1;
  300. }
  301. /* Reset all possible BHRB entries */
  302. static void power_pmu_bhrb_reset(void)
  303. {
  304. asm volatile(PPC_CLRBHRB);
  305. }
  306. static void power_pmu_bhrb_enable(struct perf_event *event)
  307. {
  308. struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
  309. if (!ppmu->bhrb_nr)
  310. return;
  311. /* Clear BHRB if we changed task context to avoid data leaks */
  312. if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
  313. power_pmu_bhrb_reset();
  314. cpuhw->bhrb_context = event->ctx;
  315. }
  316. cpuhw->bhrb_users++;
  317. perf_sched_cb_inc(event->ctx->pmu);
  318. }
  319. static void power_pmu_bhrb_disable(struct perf_event *event)
  320. {
  321. struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
  322. if (!ppmu->bhrb_nr)
  323. return;
  324. WARN_ON_ONCE(!cpuhw->bhrb_users);
  325. cpuhw->bhrb_users--;
  326. perf_sched_cb_dec(event->ctx->pmu);
  327. if (!cpuhw->disabled && !cpuhw->bhrb_users) {
  328. /* BHRB cannot be turned off when other
  329. * events are active on the PMU.
  330. */
  331. /* avoid stale pointer */
  332. cpuhw->bhrb_context = NULL;
  333. }
  334. }
  335. /* Called from ctxsw to prevent one process's branch entries to
  336. * mingle with the other process's entries during context switch.
  337. */
  338. static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
  339. {
  340. if (!ppmu->bhrb_nr)
  341. return;
  342. if (sched_in)
  343. power_pmu_bhrb_reset();
  344. }
  345. /* Calculate the to address for a branch */
  346. static __u64 power_pmu_bhrb_to(u64 addr)
  347. {
  348. unsigned int instr;
  349. int ret;
  350. __u64 target;
  351. if (is_kernel_addr(addr))
  352. return branch_target((unsigned int *)addr);
  353. /* Userspace: need copy instruction here then translate it */
  354. pagefault_disable();
  355. ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
  356. if (ret) {
  357. pagefault_enable();
  358. return 0;
  359. }
  360. pagefault_enable();
  361. target = branch_target(&instr);
  362. if ((!target) || (instr & BRANCH_ABSOLUTE))
  363. return target;
  364. /* Translate relative branch target from kernel to user address */
  365. return target - (unsigned long)&instr + addr;
  366. }
  367. /* Processing BHRB entries */
  368. static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
  369. {
  370. u64 val;
  371. u64 addr;
  372. int r_index, u_index, pred;
  373. r_index = 0;
  374. u_index = 0;
  375. while (r_index < ppmu->bhrb_nr) {
  376. /* Assembly read function */
  377. val = read_bhrb(r_index++);
  378. if (!val)
  379. /* Terminal marker: End of valid BHRB entries */
  380. break;
  381. else {
  382. addr = val & BHRB_EA;
  383. pred = val & BHRB_PREDICTION;
  384. if (!addr)
  385. /* invalid entry */
  386. continue;
  387. /* Branches are read most recent first (ie. mfbhrb 0 is
  388. * the most recent branch).
  389. * There are two types of valid entries:
  390. * 1) a target entry which is the to address of a
  391. * computed goto like a blr,bctr,btar. The next
  392. * entry read from the bhrb will be branch
  393. * corresponding to this target (ie. the actual
  394. * blr/bctr/btar instruction).
  395. * 2) a from address which is an actual branch. If a
  396. * target entry proceeds this, then this is the
  397. * matching branch for that target. If this is not
  398. * following a target entry, then this is a branch
  399. * where the target is given as an immediate field
  400. * in the instruction (ie. an i or b form branch).
  401. * In this case we need to read the instruction from
  402. * memory to determine the target/to address.
  403. */
  404. if (val & BHRB_TARGET) {
  405. /* Target branches use two entries
  406. * (ie. computed gotos/XL form)
  407. */
  408. cpuhw->bhrb_entries[u_index].to = addr;
  409. cpuhw->bhrb_entries[u_index].mispred = pred;
  410. cpuhw->bhrb_entries[u_index].predicted = ~pred;
  411. /* Get from address in next entry */
  412. val = read_bhrb(r_index++);
  413. addr = val & BHRB_EA;
  414. if (val & BHRB_TARGET) {
  415. /* Shouldn't have two targets in a
  416. row.. Reset index and try again */
  417. r_index--;
  418. addr = 0;
  419. }
  420. cpuhw->bhrb_entries[u_index].from = addr;
  421. } else {
  422. /* Branches to immediate field
  423. (ie I or B form) */
  424. cpuhw->bhrb_entries[u_index].from = addr;
  425. cpuhw->bhrb_entries[u_index].to =
  426. power_pmu_bhrb_to(addr);
  427. cpuhw->bhrb_entries[u_index].mispred = pred;
  428. cpuhw->bhrb_entries[u_index].predicted = ~pred;
  429. }
  430. u_index++;
  431. }
  432. }
  433. cpuhw->bhrb_stack.nr = u_index;
  434. return;
  435. }
  436. static bool is_ebb_event(struct perf_event *event)
  437. {
  438. /*
  439. * This could be a per-PMU callback, but we'd rather avoid the cost. We
  440. * check that the PMU supports EBB, meaning those that don't can still
  441. * use bit 63 of the event code for something else if they wish.
  442. */
  443. return (ppmu->flags & PPMU_ARCH_207S) &&
  444. ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
  445. }
  446. static int ebb_event_check(struct perf_event *event)
  447. {
  448. struct perf_event *leader = event->group_leader;
  449. /* Event and group leader must agree on EBB */
  450. if (is_ebb_event(leader) != is_ebb_event(event))
  451. return -EINVAL;
  452. if (is_ebb_event(event)) {
  453. if (!(event->attach_state & PERF_ATTACH_TASK))
  454. return -EINVAL;
  455. if (!leader->attr.pinned || !leader->attr.exclusive)
  456. return -EINVAL;
  457. if (event->attr.freq ||
  458. event->attr.inherit ||
  459. event->attr.sample_type ||
  460. event->attr.sample_period ||
  461. event->attr.enable_on_exec)
  462. return -EINVAL;
  463. }
  464. return 0;
  465. }
  466. static void ebb_event_add(struct perf_event *event)
  467. {
  468. if (!is_ebb_event(event) || current->thread.used_ebb)
  469. return;
  470. /*
  471. * IFF this is the first time we've added an EBB event, set
  472. * PMXE in the user MMCR0 so we can detect when it's cleared by
  473. * userspace. We need this so that we can context switch while
  474. * userspace is in the EBB handler (where PMXE is 0).
  475. */
  476. current->thread.used_ebb = 1;
  477. current->thread.mmcr0 |= MMCR0_PMXE;
  478. }
  479. static void ebb_switch_out(unsigned long mmcr0)
  480. {
  481. if (!(mmcr0 & MMCR0_EBE))
  482. return;
  483. current->thread.siar = mfspr(SPRN_SIAR);
  484. current->thread.sier = mfspr(SPRN_SIER);
  485. current->thread.sdar = mfspr(SPRN_SDAR);
  486. current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
  487. current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
  488. }
  489. static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
  490. {
  491. unsigned long mmcr0 = cpuhw->mmcr[0];
  492. if (!ebb)
  493. goto out;
  494. /* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */
  495. mmcr0 |= MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC_U6;
  496. /*
  497. * Add any bits from the user MMCR0, FC or PMAO. This is compatible
  498. * with pmao_restore_workaround() because we may add PMAO but we never
  499. * clear it here.
  500. */
  501. mmcr0 |= current->thread.mmcr0;
  502. /*
  503. * Be careful not to set PMXE if userspace had it cleared. This is also
  504. * compatible with pmao_restore_workaround() because it has already
  505. * cleared PMXE and we leave PMAO alone.
  506. */
  507. if (!(current->thread.mmcr0 & MMCR0_PMXE))
  508. mmcr0 &= ~MMCR0_PMXE;
  509. mtspr(SPRN_SIAR, current->thread.siar);
  510. mtspr(SPRN_SIER, current->thread.sier);
  511. mtspr(SPRN_SDAR, current->thread.sdar);
  512. /*
  513. * Merge the kernel & user values of MMCR2. The semantics we implement
  514. * are that the user MMCR2 can set bits, ie. cause counters to freeze,
  515. * but not clear bits. If a task wants to be able to clear bits, ie.
  516. * unfreeze counters, it should not set exclude_xxx in its events and
  517. * instead manage the MMCR2 entirely by itself.
  518. */
  519. mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2);
  520. out:
  521. return mmcr0;
  522. }
  523. static void pmao_restore_workaround(bool ebb)
  524. {
  525. unsigned pmcs[6];
  526. if (!cpu_has_feature(CPU_FTR_PMAO_BUG))
  527. return;
  528. /*
  529. * On POWER8E there is a hardware defect which affects the PMU context
  530. * switch logic, ie. power_pmu_disable/enable().
  531. *
  532. * When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0
  533. * by the hardware. Sometime later the actual PMU exception is
  534. * delivered.
  535. *
  536. * If we context switch, or simply disable/enable, the PMU prior to the
  537. * exception arriving, the exception will be lost when we clear PMAO.
  538. *
  539. * When we reenable the PMU, we will write the saved MMCR0 with PMAO
  540. * set, and this _should_ generate an exception. However because of the
  541. * defect no exception is generated when we write PMAO, and we get
  542. * stuck with no counters counting but no exception delivered.
  543. *
  544. * The workaround is to detect this case and tweak the hardware to
  545. * create another pending PMU exception.
  546. *
  547. * We do that by setting up PMC6 (cycles) for an imminent overflow and
  548. * enabling the PMU. That causes a new exception to be generated in the
  549. * chip, but we don't take it yet because we have interrupts hard
  550. * disabled. We then write back the PMU state as we want it to be seen
  551. * by the exception handler. When we reenable interrupts the exception
  552. * handler will be called and see the correct state.
  553. *
  554. * The logic is the same for EBB, except that the exception is gated by
  555. * us having interrupts hard disabled as well as the fact that we are
  556. * not in userspace. The exception is finally delivered when we return
  557. * to userspace.
  558. */
  559. /* Only if PMAO is set and PMAO_SYNC is clear */
  560. if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO)
  561. return;
  562. /* If we're doing EBB, only if BESCR[GE] is set */
  563. if (ebb && !(current->thread.bescr & BESCR_GE))
  564. return;
  565. /*
  566. * We are already soft-disabled in power_pmu_enable(). We need to hard
  567. * disable to actually prevent the PMU exception from firing.
  568. */
  569. hard_irq_disable();
  570. /*
  571. * This is a bit gross, but we know we're on POWER8E and have 6 PMCs.
  572. * Using read/write_pmc() in a for loop adds 12 function calls and
  573. * almost doubles our code size.
  574. */
  575. pmcs[0] = mfspr(SPRN_PMC1);
  576. pmcs[1] = mfspr(SPRN_PMC2);
  577. pmcs[2] = mfspr(SPRN_PMC3);
  578. pmcs[3] = mfspr(SPRN_PMC4);
  579. pmcs[4] = mfspr(SPRN_PMC5);
  580. pmcs[5] = mfspr(SPRN_PMC6);
  581. /* Ensure all freeze bits are unset */
  582. mtspr(SPRN_MMCR2, 0);
  583. /* Set up PMC6 to overflow in one cycle */
  584. mtspr(SPRN_PMC6, 0x7FFFFFFE);
  585. /* Enable exceptions and unfreeze PMC6 */
  586. mtspr(SPRN_MMCR0, MMCR0_PMXE | MMCR0_PMCjCE | MMCR0_PMAO);
  587. /* Now we need to refreeze and restore the PMCs */
  588. mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMAO);
  589. mtspr(SPRN_PMC1, pmcs[0]);
  590. mtspr(SPRN_PMC2, pmcs[1]);
  591. mtspr(SPRN_PMC3, pmcs[2]);
  592. mtspr(SPRN_PMC4, pmcs[3]);
  593. mtspr(SPRN_PMC5, pmcs[4]);
  594. mtspr(SPRN_PMC6, pmcs[5]);
  595. }
  596. static bool use_ic(u64 event)
  597. {
  598. if (cpu_has_feature(CPU_FTR_POWER9_DD1) &&
  599. (event == 0x200f2 || event == 0x300f2))
  600. return true;
  601. return false;
  602. }
  603. #endif /* CONFIG_PPC64 */
  604. static void perf_event_interrupt(struct pt_regs *regs);
  605. /*
  606. * Read one performance monitor counter (PMC).
  607. */
  608. static unsigned long read_pmc(int idx)
  609. {
  610. unsigned long val;
  611. switch (idx) {
  612. case 1:
  613. val = mfspr(SPRN_PMC1);
  614. break;
  615. case 2:
  616. val = mfspr(SPRN_PMC2);
  617. break;
  618. case 3:
  619. val = mfspr(SPRN_PMC3);
  620. break;
  621. case 4:
  622. val = mfspr(SPRN_PMC4);
  623. break;
  624. case 5:
  625. val = mfspr(SPRN_PMC5);
  626. break;
  627. case 6:
  628. val = mfspr(SPRN_PMC6);
  629. break;
  630. #ifdef CONFIG_PPC64
  631. case 7:
  632. val = mfspr(SPRN_PMC7);
  633. break;
  634. case 8:
  635. val = mfspr(SPRN_PMC8);
  636. break;
  637. #endif /* CONFIG_PPC64 */
  638. default:
  639. printk(KERN_ERR "oops trying to read PMC%d\n", idx);
  640. val = 0;
  641. }
  642. return val;
  643. }
  644. /*
  645. * Write one PMC.
  646. */
  647. static void write_pmc(int idx, unsigned long val)
  648. {
  649. switch (idx) {
  650. case 1:
  651. mtspr(SPRN_PMC1, val);
  652. break;
  653. case 2:
  654. mtspr(SPRN_PMC2, val);
  655. break;
  656. case 3:
  657. mtspr(SPRN_PMC3, val);
  658. break;
  659. case 4:
  660. mtspr(SPRN_PMC4, val);
  661. break;
  662. case 5:
  663. mtspr(SPRN_PMC5, val);
  664. break;
  665. case 6:
  666. mtspr(SPRN_PMC6, val);
  667. break;
  668. #ifdef CONFIG_PPC64
  669. case 7:
  670. mtspr(SPRN_PMC7, val);
  671. break;
  672. case 8:
  673. mtspr(SPRN_PMC8, val);
  674. break;
  675. #endif /* CONFIG_PPC64 */
  676. default:
  677. printk(KERN_ERR "oops trying to write PMC%d\n", idx);
  678. }
  679. }
  680. /* Called from sysrq_handle_showregs() */
  681. void perf_event_print_debug(void)
  682. {
  683. unsigned long sdar, sier, flags;
  684. u32 pmcs[MAX_HWEVENTS];
  685. int i;
  686. if (!ppmu->n_counter)
  687. return;
  688. local_irq_save(flags);
  689. pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d",
  690. smp_processor_id(), ppmu->name, ppmu->n_counter);
  691. for (i = 0; i < ppmu->n_counter; i++)
  692. pmcs[i] = read_pmc(i + 1);
  693. for (; i < MAX_HWEVENTS; i++)
  694. pmcs[i] = 0xdeadbeef;
  695. pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n",
  696. pmcs[0], pmcs[1], pmcs[2], pmcs[3]);
  697. if (ppmu->n_counter > 4)
  698. pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n",
  699. pmcs[4], pmcs[5], pmcs[6], pmcs[7]);
  700. pr_info("MMCR0: %016lx MMCR1: %016lx MMCRA: %016lx\n",
  701. mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCRA));
  702. sdar = sier = 0;
  703. #ifdef CONFIG_PPC64
  704. sdar = mfspr(SPRN_SDAR);
  705. if (ppmu->flags & PPMU_HAS_SIER)
  706. sier = mfspr(SPRN_SIER);
  707. if (ppmu->flags & PPMU_ARCH_207S) {
  708. pr_info("MMCR2: %016lx EBBHR: %016lx\n",
  709. mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
  710. pr_info("EBBRR: %016lx BESCR: %016lx\n",
  711. mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
  712. }
  713. #endif
  714. pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
  715. mfspr(SPRN_SIAR), sdar, sier);
  716. local_irq_restore(flags);
  717. }
  718. /*
  719. * Check if a set of events can all go on the PMU at once.
  720. * If they can't, this will look at alternative codes for the events
  721. * and see if any combination of alternative codes is feasible.
  722. * The feasible set is returned in event_id[].
  723. */
  724. static int power_check_constraints(struct cpu_hw_events *cpuhw,
  725. u64 event_id[], unsigned int cflags[],
  726. int n_ev)
  727. {
  728. unsigned long mask, value, nv;
  729. unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
  730. int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
  731. int i, j;
  732. unsigned long addf = ppmu->add_fields;
  733. unsigned long tadd = ppmu->test_adder;
  734. if (n_ev > ppmu->n_counter)
  735. return -1;
  736. /* First see if the events will go on as-is */
  737. for (i = 0; i < n_ev; ++i) {
  738. if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
  739. && !ppmu->limited_pmc_event(event_id[i])) {
  740. ppmu->get_alternatives(event_id[i], cflags[i],
  741. cpuhw->alternatives[i]);
  742. event_id[i] = cpuhw->alternatives[i][0];
  743. }
  744. if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
  745. &cpuhw->avalues[i][0]))
  746. return -1;
  747. }
  748. value = mask = 0;
  749. for (i = 0; i < n_ev; ++i) {
  750. nv = (value | cpuhw->avalues[i][0]) +
  751. (value & cpuhw->avalues[i][0] & addf);
  752. if ((((nv + tadd) ^ value) & mask) != 0 ||
  753. (((nv + tadd) ^ cpuhw->avalues[i][0]) &
  754. cpuhw->amasks[i][0]) != 0)
  755. break;
  756. value = nv;
  757. mask |= cpuhw->amasks[i][0];
  758. }
  759. if (i == n_ev)
  760. return 0; /* all OK */
  761. /* doesn't work, gather alternatives... */
  762. if (!ppmu->get_alternatives)
  763. return -1;
  764. for (i = 0; i < n_ev; ++i) {
  765. choice[i] = 0;
  766. n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
  767. cpuhw->alternatives[i]);
  768. for (j = 1; j < n_alt[i]; ++j)
  769. ppmu->get_constraint(cpuhw->alternatives[i][j],
  770. &cpuhw->amasks[i][j],
  771. &cpuhw->avalues[i][j]);
  772. }
  773. /* enumerate all possibilities and see if any will work */
  774. i = 0;
  775. j = -1;
  776. value = mask = nv = 0;
  777. while (i < n_ev) {
  778. if (j >= 0) {
  779. /* we're backtracking, restore context */
  780. value = svalues[i];
  781. mask = smasks[i];
  782. j = choice[i];
  783. }
  784. /*
  785. * See if any alternative k for event_id i,
  786. * where k > j, will satisfy the constraints.
  787. */
  788. while (++j < n_alt[i]) {
  789. nv = (value | cpuhw->avalues[i][j]) +
  790. (value & cpuhw->avalues[i][j] & addf);
  791. if ((((nv + tadd) ^ value) & mask) == 0 &&
  792. (((nv + tadd) ^ cpuhw->avalues[i][j])
  793. & cpuhw->amasks[i][j]) == 0)
  794. break;
  795. }
  796. if (j >= n_alt[i]) {
  797. /*
  798. * No feasible alternative, backtrack
  799. * to event_id i-1 and continue enumerating its
  800. * alternatives from where we got up to.
  801. */
  802. if (--i < 0)
  803. return -1;
  804. } else {
  805. /*
  806. * Found a feasible alternative for event_id i,
  807. * remember where we got up to with this event_id,
  808. * go on to the next event_id, and start with
  809. * the first alternative for it.
  810. */
  811. choice[i] = j;
  812. svalues[i] = value;
  813. smasks[i] = mask;
  814. value = nv;
  815. mask |= cpuhw->amasks[i][j];
  816. ++i;
  817. j = -1;
  818. }
  819. }
  820. /* OK, we have a feasible combination, tell the caller the solution */
  821. for (i = 0; i < n_ev; ++i)
  822. event_id[i] = cpuhw->alternatives[i][choice[i]];
  823. return 0;
  824. }
  825. /*
  826. * Check if newly-added events have consistent settings for
  827. * exclude_{user,kernel,hv} with each other and any previously
  828. * added events.
  829. */
  830. static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
  831. int n_prev, int n_new)
  832. {
  833. int eu = 0, ek = 0, eh = 0;
  834. int i, n, first;
  835. struct perf_event *event;
  836. /*
  837. * If the PMU we're on supports per event exclude settings then we
  838. * don't need to do any of this logic. NB. This assumes no PMU has both
  839. * per event exclude and limited PMCs.
  840. */
  841. if (ppmu->flags & PPMU_ARCH_207S)
  842. return 0;
  843. n = n_prev + n_new;
  844. if (n <= 1)
  845. return 0;
  846. first = 1;
  847. for (i = 0; i < n; ++i) {
  848. if (cflags[i] & PPMU_LIMITED_PMC_OK) {
  849. cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
  850. continue;
  851. }
  852. event = ctrs[i];
  853. if (first) {
  854. eu = event->attr.exclude_user;
  855. ek = event->attr.exclude_kernel;
  856. eh = event->attr.exclude_hv;
  857. first = 0;
  858. } else if (event->attr.exclude_user != eu ||
  859. event->attr.exclude_kernel != ek ||
  860. event->attr.exclude_hv != eh) {
  861. return -EAGAIN;
  862. }
  863. }
  864. if (eu || ek || eh)
  865. for (i = 0; i < n; ++i)
  866. if (cflags[i] & PPMU_LIMITED_PMC_OK)
  867. cflags[i] |= PPMU_LIMITED_PMC_REQD;
  868. return 0;
  869. }
  870. static u64 check_and_compute_delta(u64 prev, u64 val)
  871. {
  872. u64 delta = (val - prev) & 0xfffffffful;
  873. /*
  874. * POWER7 can roll back counter values, if the new value is smaller
  875. * than the previous value it will cause the delta and the counter to
  876. * have bogus values unless we rolled a counter over. If a coutner is
  877. * rolled back, it will be smaller, but within 256, which is the maximum
  878. * number of events to rollback at once. If we detect a rollback
  879. * return 0. This can lead to a small lack of precision in the
  880. * counters.
  881. */
  882. if (prev > val && (prev - val) < 256)
  883. delta = 0;
  884. return delta;
  885. }
  886. static void power_pmu_read(struct perf_event *event)
  887. {
  888. s64 val, delta, prev;
  889. struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
  890. if (event->hw.state & PERF_HES_STOPPED)
  891. return;
  892. if (!event->hw.idx)
  893. return;
  894. if (is_ebb_event(event)) {
  895. val = read_pmc(event->hw.idx);
  896. if (use_ic(event->attr.config)) {
  897. val = mfspr(SPRN_IC);
  898. if (val > cpuhw->ic_init)
  899. val = val - cpuhw->ic_init;
  900. else
  901. val = val + (0 - cpuhw->ic_init);
  902. }
  903. local64_set(&event->hw.prev_count, val);
  904. return;
  905. }
  906. /*
  907. * Performance monitor interrupts come even when interrupts
  908. * are soft-disabled, as long as interrupts are hard-enabled.
  909. * Therefore we treat them like NMIs.
  910. */
  911. do {
  912. prev = local64_read(&event->hw.prev_count);
  913. barrier();
  914. val = read_pmc(event->hw.idx);
  915. if (use_ic(event->attr.config)) {
  916. val = mfspr(SPRN_IC);
  917. if (val > cpuhw->ic_init)
  918. val = val - cpuhw->ic_init;
  919. else
  920. val = val + (0 - cpuhw->ic_init);
  921. }
  922. delta = check_and_compute_delta(prev, val);
  923. if (!delta)
  924. return;
  925. } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
  926. local64_add(delta, &event->count);
  927. /*
  928. * A number of places program the PMC with (0x80000000 - period_left).
  929. * We never want period_left to be less than 1 because we will program
  930. * the PMC with a value >= 0x800000000 and an edge detected PMC will
  931. * roll around to 0 before taking an exception. We have seen this
  932. * on POWER8.
  933. *
  934. * To fix this, clamp the minimum value of period_left to 1.
  935. */
  936. do {
  937. prev = local64_read(&event->hw.period_left);
  938. val = prev - delta;
  939. if (val < 1)
  940. val = 1;
  941. } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
  942. }
  943. /*
  944. * On some machines, PMC5 and PMC6 can't be written, don't respect
  945. * the freeze conditions, and don't generate interrupts. This tells
  946. * us if `event' is using such a PMC.
  947. */
  948. static int is_limited_pmc(int pmcnum)
  949. {
  950. return (ppmu->flags & PPMU_LIMITED_PMC5_6)
  951. && (pmcnum == 5 || pmcnum == 6);
  952. }
  953. static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
  954. unsigned long pmc5, unsigned long pmc6)
  955. {
  956. struct perf_event *event;
  957. u64 val, prev, delta;
  958. int i;
  959. for (i = 0; i < cpuhw->n_limited; ++i) {
  960. event = cpuhw->limited_counter[i];
  961. if (!event->hw.idx)
  962. continue;
  963. val = (event->hw.idx == 5) ? pmc5 : pmc6;
  964. prev = local64_read(&event->hw.prev_count);
  965. event->hw.idx = 0;
  966. delta = check_and_compute_delta(prev, val);
  967. if (delta)
  968. local64_add(delta, &event->count);
  969. }
  970. }
  971. static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
  972. unsigned long pmc5, unsigned long pmc6)
  973. {
  974. struct perf_event *event;
  975. u64 val, prev;
  976. int i;
  977. for (i = 0; i < cpuhw->n_limited; ++i) {
  978. event = cpuhw->limited_counter[i];
  979. event->hw.idx = cpuhw->limited_hwidx[i];
  980. val = (event->hw.idx == 5) ? pmc5 : pmc6;
  981. prev = local64_read(&event->hw.prev_count);
  982. if (check_and_compute_delta(prev, val))
  983. local64_set(&event->hw.prev_count, val);
  984. perf_event_update_userpage(event);
  985. }
  986. }
  987. /*
  988. * Since limited events don't respect the freeze conditions, we
  989. * have to read them immediately after freezing or unfreezing the
  990. * other events. We try to keep the values from the limited
  991. * events as consistent as possible by keeping the delay (in
  992. * cycles and instructions) between freezing/unfreezing and reading
  993. * the limited events as small and consistent as possible.
  994. * Therefore, if any limited events are in use, we read them
  995. * both, and always in the same order, to minimize variability,
  996. * and do it inside the same asm that writes MMCR0.
  997. */
  998. static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
  999. {
  1000. unsigned long pmc5, pmc6;
  1001. if (!cpuhw->n_limited) {
  1002. mtspr(SPRN_MMCR0, mmcr0);
  1003. return;
  1004. }
  1005. /*
  1006. * Write MMCR0, then read PMC5 and PMC6 immediately.
  1007. * To ensure we don't get a performance monitor interrupt
  1008. * between writing MMCR0 and freezing/thawing the limited
  1009. * events, we first write MMCR0 with the event overflow
  1010. * interrupt enable bits turned off.
  1011. */
  1012. asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
  1013. : "=&r" (pmc5), "=&r" (pmc6)
  1014. : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
  1015. "i" (SPRN_MMCR0),
  1016. "i" (SPRN_PMC5), "i" (SPRN_PMC6));
  1017. if (mmcr0 & MMCR0_FC)
  1018. freeze_limited_counters(cpuhw, pmc5, pmc6);
  1019. else
  1020. thaw_limited_counters(cpuhw, pmc5, pmc6);
  1021. /*
  1022. * Write the full MMCR0 including the event overflow interrupt
  1023. * enable bits, if necessary.
  1024. */
  1025. if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
  1026. mtspr(SPRN_MMCR0, mmcr0);
  1027. }
  1028. /*
  1029. * Disable all events to prevent PMU interrupts and to allow
  1030. * events to be added or removed.
  1031. */
  1032. static void power_pmu_disable(struct pmu *pmu)
  1033. {
  1034. struct cpu_hw_events *cpuhw;
  1035. unsigned long flags, mmcr0, val;
  1036. if (!ppmu)
  1037. return;
  1038. local_irq_save(flags);
  1039. cpuhw = this_cpu_ptr(&cpu_hw_events);
  1040. if (!cpuhw->disabled) {
  1041. /*
  1042. * Check if we ever enabled the PMU on this cpu.
  1043. */
  1044. if (!cpuhw->pmcs_enabled) {
  1045. ppc_enable_pmcs();
  1046. cpuhw->pmcs_enabled = 1;
  1047. }
  1048. /*
  1049. * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
  1050. */
  1051. val = mmcr0 = mfspr(SPRN_MMCR0);
  1052. val |= MMCR0_FC;
  1053. val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
  1054. MMCR0_FC56);
  1055. /*
  1056. * The barrier is to make sure the mtspr has been
  1057. * executed and the PMU has frozen the events etc.
  1058. * before we return.
  1059. */
  1060. write_mmcr0(cpuhw, val);
  1061. mb();
  1062. /*
  1063. * Disable instruction sampling if it was enabled
  1064. */
  1065. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  1066. mtspr(SPRN_MMCRA,
  1067. cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  1068. mb();
  1069. }
  1070. cpuhw->disabled = 1;
  1071. cpuhw->n_added = 0;
  1072. ebb_switch_out(mmcr0);
  1073. }
  1074. local_irq_restore(flags);
  1075. }
  1076. /*
  1077. * Re-enable all events if disable == 0.
  1078. * If we were previously disabled and events were added, then
  1079. * put the new config on the PMU.
  1080. */
  1081. static void power_pmu_enable(struct pmu *pmu)
  1082. {
  1083. struct perf_event *event;
  1084. struct cpu_hw_events *cpuhw;
  1085. unsigned long flags;
  1086. long i;
  1087. unsigned long val, mmcr0;
  1088. s64 left;
  1089. unsigned int hwc_index[MAX_HWEVENTS];
  1090. int n_lim;
  1091. int idx;
  1092. bool ebb;
  1093. if (!ppmu)
  1094. return;
  1095. local_irq_save(flags);
  1096. cpuhw = this_cpu_ptr(&cpu_hw_events);
  1097. if (!cpuhw->disabled)
  1098. goto out;
  1099. if (cpuhw->n_events == 0) {
  1100. ppc_set_pmu_inuse(0);
  1101. goto out;
  1102. }
  1103. cpuhw->disabled = 0;
  1104. /*
  1105. * EBB requires an exclusive group and all events must have the EBB
  1106. * flag set, or not set, so we can just check a single event. Also we
  1107. * know we have at least one event.
  1108. */
  1109. ebb = is_ebb_event(cpuhw->event[0]);
  1110. /*
  1111. * If we didn't change anything, or only removed events,
  1112. * no need to recalculate MMCR* settings and reset the PMCs.
  1113. * Just reenable the PMU with the current MMCR* settings
  1114. * (possibly updated for removal of events).
  1115. */
  1116. if (!cpuhw->n_added) {
  1117. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  1118. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  1119. goto out_enable;
  1120. }
  1121. /*
  1122. * Clear all MMCR settings and recompute them for the new set of events.
  1123. */
  1124. memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
  1125. if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
  1126. cpuhw->mmcr, cpuhw->event)) {
  1127. /* shouldn't ever get here */
  1128. printk(KERN_ERR "oops compute_mmcr failed\n");
  1129. goto out;
  1130. }
  1131. if (!(ppmu->flags & PPMU_ARCH_207S)) {
  1132. /*
  1133. * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
  1134. * bits for the first event. We have already checked that all
  1135. * events have the same value for these bits as the first event.
  1136. */
  1137. event = cpuhw->event[0];
  1138. if (event->attr.exclude_user)
  1139. cpuhw->mmcr[0] |= MMCR0_FCP;
  1140. if (event->attr.exclude_kernel)
  1141. cpuhw->mmcr[0] |= freeze_events_kernel;
  1142. if (event->attr.exclude_hv)
  1143. cpuhw->mmcr[0] |= MMCR0_FCHV;
  1144. }
  1145. /*
  1146. * Write the new configuration to MMCR* with the freeze
  1147. * bit set and set the hardware events to their initial values.
  1148. * Then unfreeze the events.
  1149. */
  1150. ppc_set_pmu_inuse(1);
  1151. mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
  1152. mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
  1153. mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
  1154. | MMCR0_FC);
  1155. if (ppmu->flags & PPMU_ARCH_207S)
  1156. mtspr(SPRN_MMCR2, cpuhw->mmcr[3]);
  1157. /*
  1158. * Read off any pre-existing events that need to move
  1159. * to another PMC.
  1160. */
  1161. for (i = 0; i < cpuhw->n_events; ++i) {
  1162. event = cpuhw->event[i];
  1163. if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
  1164. power_pmu_read(event);
  1165. write_pmc(event->hw.idx, 0);
  1166. event->hw.idx = 0;
  1167. }
  1168. }
  1169. /*
  1170. * Initialize the PMCs for all the new and moved events.
  1171. */
  1172. cpuhw->n_limited = n_lim = 0;
  1173. for (i = 0; i < cpuhw->n_events; ++i) {
  1174. event = cpuhw->event[i];
  1175. if (event->hw.idx)
  1176. continue;
  1177. idx = hwc_index[i] + 1;
  1178. if (is_limited_pmc(idx)) {
  1179. cpuhw->limited_counter[n_lim] = event;
  1180. cpuhw->limited_hwidx[n_lim] = idx;
  1181. ++n_lim;
  1182. continue;
  1183. }
  1184. if (ebb)
  1185. val = local64_read(&event->hw.prev_count);
  1186. else {
  1187. val = 0;
  1188. if (event->hw.sample_period) {
  1189. left = local64_read(&event->hw.period_left);
  1190. if (left < 0x80000000L)
  1191. val = 0x80000000L - left;
  1192. }
  1193. local64_set(&event->hw.prev_count, val);
  1194. }
  1195. event->hw.idx = idx;
  1196. if (event->hw.state & PERF_HES_STOPPED)
  1197. val = 0;
  1198. write_pmc(idx, val);
  1199. perf_event_update_userpage(event);
  1200. }
  1201. cpuhw->n_limited = n_lim;
  1202. cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
  1203. out_enable:
  1204. pmao_restore_workaround(ebb);
  1205. mmcr0 = ebb_switch_in(ebb, cpuhw);
  1206. mb();
  1207. if (cpuhw->bhrb_users)
  1208. ppmu->config_bhrb(cpuhw->bhrb_filter);
  1209. write_mmcr0(cpuhw, mmcr0);
  1210. /*
  1211. * Enable instruction sampling if necessary
  1212. */
  1213. if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
  1214. mb();
  1215. mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
  1216. }
  1217. out:
  1218. local_irq_restore(flags);
  1219. }
  1220. static int collect_events(struct perf_event *group, int max_count,
  1221. struct perf_event *ctrs[], u64 *events,
  1222. unsigned int *flags)
  1223. {
  1224. int n = 0;
  1225. struct perf_event *event;
  1226. if (!is_software_event(group)) {
  1227. if (n >= max_count)
  1228. return -1;
  1229. ctrs[n] = group;
  1230. flags[n] = group->hw.event_base;
  1231. events[n++] = group->hw.config;
  1232. }
  1233. list_for_each_entry(event, &group->sibling_list, group_entry) {
  1234. if (!is_software_event(event) &&
  1235. event->state != PERF_EVENT_STATE_OFF) {
  1236. if (n >= max_count)
  1237. return -1;
  1238. ctrs[n] = event;
  1239. flags[n] = event->hw.event_base;
  1240. events[n++] = event->hw.config;
  1241. }
  1242. }
  1243. return n;
  1244. }
  1245. /*
  1246. * Add a event to the PMU.
  1247. * If all events are not already frozen, then we disable and
  1248. * re-enable the PMU in order to get hw_perf_enable to do the
  1249. * actual work of reconfiguring the PMU.
  1250. */
  1251. static int power_pmu_add(struct perf_event *event, int ef_flags)
  1252. {
  1253. struct cpu_hw_events *cpuhw;
  1254. unsigned long flags;
  1255. int n0;
  1256. int ret = -EAGAIN;
  1257. local_irq_save(flags);
  1258. perf_pmu_disable(event->pmu);
  1259. /*
  1260. * Add the event to the list (if there is room)
  1261. * and check whether the total set is still feasible.
  1262. */
  1263. cpuhw = this_cpu_ptr(&cpu_hw_events);
  1264. n0 = cpuhw->n_events;
  1265. if (n0 >= ppmu->n_counter)
  1266. goto out;
  1267. cpuhw->event[n0] = event;
  1268. cpuhw->events[n0] = event->hw.config;
  1269. cpuhw->flags[n0] = event->hw.event_base;
  1270. /*
  1271. * This event may have been disabled/stopped in record_and_restart()
  1272. * because we exceeded the ->event_limit. If re-starting the event,
  1273. * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
  1274. * notification is re-enabled.
  1275. */
  1276. if (!(ef_flags & PERF_EF_START))
  1277. event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  1278. else
  1279. event->hw.state = 0;
  1280. /*
  1281. * If group events scheduling transaction was started,
  1282. * skip the schedulability test here, it will be performed
  1283. * at commit time(->commit_txn) as a whole
  1284. */
  1285. if (cpuhw->txn_flags & PERF_PMU_TXN_ADD)
  1286. goto nocheck;
  1287. if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
  1288. goto out;
  1289. if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
  1290. goto out;
  1291. event->hw.config = cpuhw->events[n0];
  1292. nocheck:
  1293. ebb_event_add(event);
  1294. ++cpuhw->n_events;
  1295. ++cpuhw->n_added;
  1296. ret = 0;
  1297. out:
  1298. if (has_branch_stack(event)) {
  1299. power_pmu_bhrb_enable(event);
  1300. cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
  1301. event->attr.branch_sample_type);
  1302. }
  1303. /*
  1304. * Workaround for POWER9 DD1 to use the Instruction Counter
  1305. * register value for instruction counting
  1306. */
  1307. if (use_ic(event->attr.config))
  1308. cpuhw->ic_init = mfspr(SPRN_IC);
  1309. perf_pmu_enable(event->pmu);
  1310. local_irq_restore(flags);
  1311. return ret;
  1312. }
  1313. /*
  1314. * Remove a event from the PMU.
  1315. */
  1316. static void power_pmu_del(struct perf_event *event, int ef_flags)
  1317. {
  1318. struct cpu_hw_events *cpuhw;
  1319. long i;
  1320. unsigned long flags;
  1321. local_irq_save(flags);
  1322. perf_pmu_disable(event->pmu);
  1323. power_pmu_read(event);
  1324. cpuhw = this_cpu_ptr(&cpu_hw_events);
  1325. for (i = 0; i < cpuhw->n_events; ++i) {
  1326. if (event == cpuhw->event[i]) {
  1327. while (++i < cpuhw->n_events) {
  1328. cpuhw->event[i-1] = cpuhw->event[i];
  1329. cpuhw->events[i-1] = cpuhw->events[i];
  1330. cpuhw->flags[i-1] = cpuhw->flags[i];
  1331. }
  1332. --cpuhw->n_events;
  1333. ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
  1334. if (event->hw.idx) {
  1335. write_pmc(event->hw.idx, 0);
  1336. event->hw.idx = 0;
  1337. }
  1338. perf_event_update_userpage(event);
  1339. break;
  1340. }
  1341. }
  1342. for (i = 0; i < cpuhw->n_limited; ++i)
  1343. if (event == cpuhw->limited_counter[i])
  1344. break;
  1345. if (i < cpuhw->n_limited) {
  1346. while (++i < cpuhw->n_limited) {
  1347. cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
  1348. cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
  1349. }
  1350. --cpuhw->n_limited;
  1351. }
  1352. if (cpuhw->n_events == 0) {
  1353. /* disable exceptions if no events are running */
  1354. cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
  1355. }
  1356. if (has_branch_stack(event))
  1357. power_pmu_bhrb_disable(event);
  1358. perf_pmu_enable(event->pmu);
  1359. local_irq_restore(flags);
  1360. }
  1361. /*
  1362. * POWER-PMU does not support disabling individual counters, hence
  1363. * program their cycle counter to their max value and ignore the interrupts.
  1364. */
  1365. static void power_pmu_start(struct perf_event *event, int ef_flags)
  1366. {
  1367. unsigned long flags;
  1368. s64 left;
  1369. unsigned long val;
  1370. if (!event->hw.idx || !event->hw.sample_period)
  1371. return;
  1372. if (!(event->hw.state & PERF_HES_STOPPED))
  1373. return;
  1374. if (ef_flags & PERF_EF_RELOAD)
  1375. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  1376. local_irq_save(flags);
  1377. perf_pmu_disable(event->pmu);
  1378. event->hw.state = 0;
  1379. left = local64_read(&event->hw.period_left);
  1380. val = 0;
  1381. if (left < 0x80000000L)
  1382. val = 0x80000000L - left;
  1383. write_pmc(event->hw.idx, val);
  1384. perf_event_update_userpage(event);
  1385. perf_pmu_enable(event->pmu);
  1386. local_irq_restore(flags);
  1387. }
  1388. static void power_pmu_stop(struct perf_event *event, int ef_flags)
  1389. {
  1390. unsigned long flags;
  1391. if (!event->hw.idx || !event->hw.sample_period)
  1392. return;
  1393. if (event->hw.state & PERF_HES_STOPPED)
  1394. return;
  1395. local_irq_save(flags);
  1396. perf_pmu_disable(event->pmu);
  1397. power_pmu_read(event);
  1398. event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  1399. write_pmc(event->hw.idx, 0);
  1400. perf_event_update_userpage(event);
  1401. perf_pmu_enable(event->pmu);
  1402. local_irq_restore(flags);
  1403. }
  1404. /*
  1405. * Start group events scheduling transaction
  1406. * Set the flag to make pmu::enable() not perform the
  1407. * schedulability test, it will be performed at commit time
  1408. *
  1409. * We only support PERF_PMU_TXN_ADD transactions. Save the
  1410. * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
  1411. * transactions.
  1412. */
  1413. static void power_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
  1414. {
  1415. struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
  1416. WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
  1417. cpuhw->txn_flags = txn_flags;
  1418. if (txn_flags & ~PERF_PMU_TXN_ADD)
  1419. return;
  1420. perf_pmu_disable(pmu);
  1421. cpuhw->n_txn_start = cpuhw->n_events;
  1422. }
  1423. /*
  1424. * Stop group events scheduling transaction
  1425. * Clear the flag and pmu::enable() will perform the
  1426. * schedulability test.
  1427. */
  1428. static void power_pmu_cancel_txn(struct pmu *pmu)
  1429. {
  1430. struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
  1431. unsigned int txn_flags;
  1432. WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
  1433. txn_flags = cpuhw->txn_flags;
  1434. cpuhw->txn_flags = 0;
  1435. if (txn_flags & ~PERF_PMU_TXN_ADD)
  1436. return;
  1437. perf_pmu_enable(pmu);
  1438. }
  1439. /*
  1440. * Commit group events scheduling transaction
  1441. * Perform the group schedulability test as a whole
  1442. * Return 0 if success
  1443. */
  1444. static int power_pmu_commit_txn(struct pmu *pmu)
  1445. {
  1446. struct cpu_hw_events *cpuhw;
  1447. long i, n;
  1448. if (!ppmu)
  1449. return -EAGAIN;
  1450. cpuhw = this_cpu_ptr(&cpu_hw_events);
  1451. WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
  1452. if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
  1453. cpuhw->txn_flags = 0;
  1454. return 0;
  1455. }
  1456. n = cpuhw->n_events;
  1457. if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
  1458. return -EAGAIN;
  1459. i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
  1460. if (i < 0)
  1461. return -EAGAIN;
  1462. for (i = cpuhw->n_txn_start; i < n; ++i)
  1463. cpuhw->event[i]->hw.config = cpuhw->events[i];
  1464. cpuhw->txn_flags = 0;
  1465. perf_pmu_enable(pmu);
  1466. return 0;
  1467. }
  1468. /*
  1469. * Return 1 if we might be able to put event on a limited PMC,
  1470. * or 0 if not.
  1471. * A event can only go on a limited PMC if it counts something
  1472. * that a limited PMC can count, doesn't require interrupts, and
  1473. * doesn't exclude any processor mode.
  1474. */
  1475. static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
  1476. unsigned int flags)
  1477. {
  1478. int n;
  1479. u64 alt[MAX_EVENT_ALTERNATIVES];
  1480. if (event->attr.exclude_user
  1481. || event->attr.exclude_kernel
  1482. || event->attr.exclude_hv
  1483. || event->attr.sample_period)
  1484. return 0;
  1485. if (ppmu->limited_pmc_event(ev))
  1486. return 1;
  1487. /*
  1488. * The requested event_id isn't on a limited PMC already;
  1489. * see if any alternative code goes on a limited PMC.
  1490. */
  1491. if (!ppmu->get_alternatives)
  1492. return 0;
  1493. flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
  1494. n = ppmu->get_alternatives(ev, flags, alt);
  1495. return n > 0;
  1496. }
  1497. /*
  1498. * Find an alternative event_id that goes on a normal PMC, if possible,
  1499. * and return the event_id code, or 0 if there is no such alternative.
  1500. * (Note: event_id code 0 is "don't count" on all machines.)
  1501. */
  1502. static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
  1503. {
  1504. u64 alt[MAX_EVENT_ALTERNATIVES];
  1505. int n;
  1506. flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
  1507. n = ppmu->get_alternatives(ev, flags, alt);
  1508. if (!n)
  1509. return 0;
  1510. return alt[0];
  1511. }
  1512. /* Number of perf_events counting hardware events */
  1513. static atomic_t num_events;
  1514. /* Used to avoid races in calling reserve/release_pmc_hardware */
  1515. static DEFINE_MUTEX(pmc_reserve_mutex);
  1516. /*
  1517. * Release the PMU if this is the last perf_event.
  1518. */
  1519. static void hw_perf_event_destroy(struct perf_event *event)
  1520. {
  1521. if (!atomic_add_unless(&num_events, -1, 1)) {
  1522. mutex_lock(&pmc_reserve_mutex);
  1523. if (atomic_dec_return(&num_events) == 0)
  1524. release_pmc_hardware();
  1525. mutex_unlock(&pmc_reserve_mutex);
  1526. }
  1527. }
  1528. /*
  1529. * Translate a generic cache event_id config to a raw event_id code.
  1530. */
  1531. static int hw_perf_cache_event(u64 config, u64 *eventp)
  1532. {
  1533. unsigned long type, op, result;
  1534. int ev;
  1535. if (!ppmu->cache_events)
  1536. return -EINVAL;
  1537. /* unpack config */
  1538. type = config & 0xff;
  1539. op = (config >> 8) & 0xff;
  1540. result = (config >> 16) & 0xff;
  1541. if (type >= PERF_COUNT_HW_CACHE_MAX ||
  1542. op >= PERF_COUNT_HW_CACHE_OP_MAX ||
  1543. result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  1544. return -EINVAL;
  1545. ev = (*ppmu->cache_events)[type][op][result];
  1546. if (ev == 0)
  1547. return -EOPNOTSUPP;
  1548. if (ev == -1)
  1549. return -EINVAL;
  1550. *eventp = ev;
  1551. return 0;
  1552. }
  1553. static int power_pmu_event_init(struct perf_event *event)
  1554. {
  1555. u64 ev;
  1556. unsigned long flags;
  1557. struct perf_event *ctrs[MAX_HWEVENTS];
  1558. u64 events[MAX_HWEVENTS];
  1559. unsigned int cflags[MAX_HWEVENTS];
  1560. int n;
  1561. int err;
  1562. struct cpu_hw_events *cpuhw;
  1563. if (!ppmu)
  1564. return -ENOENT;
  1565. if (has_branch_stack(event)) {
  1566. /* PMU has BHRB enabled */
  1567. if (!(ppmu->flags & PPMU_ARCH_207S))
  1568. return -EOPNOTSUPP;
  1569. }
  1570. switch (event->attr.type) {
  1571. case PERF_TYPE_HARDWARE:
  1572. ev = event->attr.config;
  1573. if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
  1574. return -EOPNOTSUPP;
  1575. ev = ppmu->generic_events[ev];
  1576. break;
  1577. case PERF_TYPE_HW_CACHE:
  1578. err = hw_perf_cache_event(event->attr.config, &ev);
  1579. if (err)
  1580. return err;
  1581. break;
  1582. case PERF_TYPE_RAW:
  1583. ev = event->attr.config;
  1584. break;
  1585. default:
  1586. return -ENOENT;
  1587. }
  1588. event->hw.config_base = ev;
  1589. event->hw.idx = 0;
  1590. /*
  1591. * If we are not running on a hypervisor, force the
  1592. * exclude_hv bit to 0 so that we don't care what
  1593. * the user set it to.
  1594. */
  1595. if (!firmware_has_feature(FW_FEATURE_LPAR))
  1596. event->attr.exclude_hv = 0;
  1597. /*
  1598. * If this is a per-task event, then we can use
  1599. * PM_RUN_* events interchangeably with their non RUN_*
  1600. * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
  1601. * XXX we should check if the task is an idle task.
  1602. */
  1603. flags = 0;
  1604. if (event->attach_state & PERF_ATTACH_TASK)
  1605. flags |= PPMU_ONLY_COUNT_RUN;
  1606. /*
  1607. * If this machine has limited events, check whether this
  1608. * event_id could go on a limited event.
  1609. */
  1610. if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
  1611. if (can_go_on_limited_pmc(event, ev, flags)) {
  1612. flags |= PPMU_LIMITED_PMC_OK;
  1613. } else if (ppmu->limited_pmc_event(ev)) {
  1614. /*
  1615. * The requested event_id is on a limited PMC,
  1616. * but we can't use a limited PMC; see if any
  1617. * alternative goes on a normal PMC.
  1618. */
  1619. ev = normal_pmc_alternative(ev, flags);
  1620. if (!ev)
  1621. return -EINVAL;
  1622. }
  1623. }
  1624. /* Extra checks for EBB */
  1625. err = ebb_event_check(event);
  1626. if (err)
  1627. return err;
  1628. /*
  1629. * If this is in a group, check if it can go on with all the
  1630. * other hardware events in the group. We assume the event
  1631. * hasn't been linked into its leader's sibling list at this point.
  1632. */
  1633. n = 0;
  1634. if (event->group_leader != event) {
  1635. n = collect_events(event->group_leader, ppmu->n_counter - 1,
  1636. ctrs, events, cflags);
  1637. if (n < 0)
  1638. return -EINVAL;
  1639. }
  1640. events[n] = ev;
  1641. ctrs[n] = event;
  1642. cflags[n] = flags;
  1643. if (check_excludes(ctrs, cflags, n, 1))
  1644. return -EINVAL;
  1645. cpuhw = &get_cpu_var(cpu_hw_events);
  1646. err = power_check_constraints(cpuhw, events, cflags, n + 1);
  1647. if (has_branch_stack(event)) {
  1648. cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
  1649. event->attr.branch_sample_type);
  1650. if (cpuhw->bhrb_filter == -1) {
  1651. put_cpu_var(cpu_hw_events);
  1652. return -EOPNOTSUPP;
  1653. }
  1654. }
  1655. put_cpu_var(cpu_hw_events);
  1656. if (err)
  1657. return -EINVAL;
  1658. event->hw.config = events[n];
  1659. event->hw.event_base = cflags[n];
  1660. event->hw.last_period = event->hw.sample_period;
  1661. local64_set(&event->hw.period_left, event->hw.last_period);
  1662. /*
  1663. * For EBB events we just context switch the PMC value, we don't do any
  1664. * of the sample_period logic. We use hw.prev_count for this.
  1665. */
  1666. if (is_ebb_event(event))
  1667. local64_set(&event->hw.prev_count, 0);
  1668. /*
  1669. * See if we need to reserve the PMU.
  1670. * If no events are currently in use, then we have to take a
  1671. * mutex to ensure that we don't race with another task doing
  1672. * reserve_pmc_hardware or release_pmc_hardware.
  1673. */
  1674. err = 0;
  1675. if (!atomic_inc_not_zero(&num_events)) {
  1676. mutex_lock(&pmc_reserve_mutex);
  1677. if (atomic_read(&num_events) == 0 &&
  1678. reserve_pmc_hardware(perf_event_interrupt))
  1679. err = -EBUSY;
  1680. else
  1681. atomic_inc(&num_events);
  1682. mutex_unlock(&pmc_reserve_mutex);
  1683. }
  1684. event->destroy = hw_perf_event_destroy;
  1685. return err;
  1686. }
  1687. static int power_pmu_event_idx(struct perf_event *event)
  1688. {
  1689. return event->hw.idx;
  1690. }
  1691. ssize_t power_events_sysfs_show(struct device *dev,
  1692. struct device_attribute *attr, char *page)
  1693. {
  1694. struct perf_pmu_events_attr *pmu_attr;
  1695. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  1696. return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
  1697. }
  1698. static struct pmu power_pmu = {
  1699. .pmu_enable = power_pmu_enable,
  1700. .pmu_disable = power_pmu_disable,
  1701. .event_init = power_pmu_event_init,
  1702. .add = power_pmu_add,
  1703. .del = power_pmu_del,
  1704. .start = power_pmu_start,
  1705. .stop = power_pmu_stop,
  1706. .read = power_pmu_read,
  1707. .start_txn = power_pmu_start_txn,
  1708. .cancel_txn = power_pmu_cancel_txn,
  1709. .commit_txn = power_pmu_commit_txn,
  1710. .event_idx = power_pmu_event_idx,
  1711. .sched_task = power_pmu_sched_task,
  1712. };
  1713. /*
  1714. * A counter has overflowed; update its count and record
  1715. * things if requested. Note that interrupts are hard-disabled
  1716. * here so there is no possibility of being interrupted.
  1717. */
  1718. static void record_and_restart(struct perf_event *event, unsigned long val,
  1719. struct pt_regs *regs)
  1720. {
  1721. u64 period = event->hw.sample_period;
  1722. s64 prev, delta, left;
  1723. int record = 0;
  1724. if (event->hw.state & PERF_HES_STOPPED) {
  1725. write_pmc(event->hw.idx, 0);
  1726. return;
  1727. }
  1728. /* we don't have to worry about interrupts here */
  1729. prev = local64_read(&event->hw.prev_count);
  1730. delta = check_and_compute_delta(prev, val);
  1731. local64_add(delta, &event->count);
  1732. /*
  1733. * See if the total period for this event has expired,
  1734. * and update for the next period.
  1735. */
  1736. val = 0;
  1737. left = local64_read(&event->hw.period_left) - delta;
  1738. if (delta == 0)
  1739. left++;
  1740. if (period) {
  1741. if (left <= 0) {
  1742. left += period;
  1743. if (left <= 0)
  1744. left = period;
  1745. record = siar_valid(regs);
  1746. event->hw.last_period = event->hw.sample_period;
  1747. }
  1748. if (left < 0x80000000LL)
  1749. val = 0x80000000LL - left;
  1750. }
  1751. write_pmc(event->hw.idx, val);
  1752. local64_set(&event->hw.prev_count, val);
  1753. local64_set(&event->hw.period_left, left);
  1754. perf_event_update_userpage(event);
  1755. /*
  1756. * Finally record data if requested.
  1757. */
  1758. if (record) {
  1759. struct perf_sample_data data;
  1760. perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
  1761. if (event->attr.sample_type & PERF_SAMPLE_ADDR)
  1762. perf_get_data_addr(regs, &data.addr);
  1763. if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
  1764. struct cpu_hw_events *cpuhw;
  1765. cpuhw = this_cpu_ptr(&cpu_hw_events);
  1766. power_pmu_bhrb_read(cpuhw);
  1767. data.br_stack = &cpuhw->bhrb_stack;
  1768. }
  1769. if (perf_event_overflow(event, &data, regs))
  1770. power_pmu_stop(event, 0);
  1771. }
  1772. }
  1773. /*
  1774. * Called from generic code to get the misc flags (i.e. processor mode)
  1775. * for an event_id.
  1776. */
  1777. unsigned long perf_misc_flags(struct pt_regs *regs)
  1778. {
  1779. u32 flags = perf_get_misc_flags(regs);
  1780. if (flags)
  1781. return flags;
  1782. return user_mode(regs) ? PERF_RECORD_MISC_USER :
  1783. PERF_RECORD_MISC_KERNEL;
  1784. }
  1785. /*
  1786. * Called from generic code to get the instruction pointer
  1787. * for an event_id.
  1788. */
  1789. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1790. {
  1791. bool use_siar = regs_use_siar(regs);
  1792. if (use_siar && siar_valid(regs))
  1793. return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
  1794. else if (use_siar)
  1795. return 0; // no valid instruction pointer
  1796. else
  1797. return regs->nip;
  1798. }
  1799. static bool pmc_overflow_power7(unsigned long val)
  1800. {
  1801. /*
  1802. * Events on POWER7 can roll back if a speculative event doesn't
  1803. * eventually complete. Unfortunately in some rare cases they will
  1804. * raise a performance monitor exception. We need to catch this to
  1805. * ensure we reset the PMC. In all cases the PMC will be 256 or less
  1806. * cycles from overflow.
  1807. *
  1808. * We only do this if the first pass fails to find any overflowing
  1809. * PMCs because a user might set a period of less than 256 and we
  1810. * don't want to mistakenly reset them.
  1811. */
  1812. if ((0x80000000 - val) <= 256)
  1813. return true;
  1814. return false;
  1815. }
  1816. static bool pmc_overflow(unsigned long val)
  1817. {
  1818. if ((int)val < 0)
  1819. return true;
  1820. return false;
  1821. }
  1822. /*
  1823. * Performance monitor interrupt stuff
  1824. */
  1825. static void perf_event_interrupt(struct pt_regs *regs)
  1826. {
  1827. int i, j;
  1828. struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
  1829. struct perf_event *event;
  1830. unsigned long val[8];
  1831. int found, active;
  1832. int nmi;
  1833. if (cpuhw->n_limited)
  1834. freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
  1835. mfspr(SPRN_PMC6));
  1836. perf_read_regs(regs);
  1837. nmi = perf_intr_is_nmi(regs);
  1838. if (nmi)
  1839. nmi_enter();
  1840. else
  1841. irq_enter();
  1842. /* Read all the PMCs since we'll need them a bunch of times */
  1843. for (i = 0; i < ppmu->n_counter; ++i)
  1844. val[i] = read_pmc(i + 1);
  1845. /* Try to find what caused the IRQ */
  1846. found = 0;
  1847. for (i = 0; i < ppmu->n_counter; ++i) {
  1848. if (!pmc_overflow(val[i]))
  1849. continue;
  1850. if (is_limited_pmc(i + 1))
  1851. continue; /* these won't generate IRQs */
  1852. /*
  1853. * We've found one that's overflowed. For active
  1854. * counters we need to log this. For inactive
  1855. * counters, we need to reset it anyway
  1856. */
  1857. found = 1;
  1858. active = 0;
  1859. for (j = 0; j < cpuhw->n_events; ++j) {
  1860. event = cpuhw->event[j];
  1861. if (event->hw.idx == (i + 1)) {
  1862. active = 1;
  1863. record_and_restart(event, val[i], regs);
  1864. break;
  1865. }
  1866. }
  1867. if (!active)
  1868. /* reset non active counters that have overflowed */
  1869. write_pmc(i + 1, 0);
  1870. }
  1871. if (!found && pvr_version_is(PVR_POWER7)) {
  1872. /* check active counters for special buggy p7 overflow */
  1873. for (i = 0; i < cpuhw->n_events; ++i) {
  1874. event = cpuhw->event[i];
  1875. if (!event->hw.idx || is_limited_pmc(event->hw.idx))
  1876. continue;
  1877. if (pmc_overflow_power7(val[event->hw.idx - 1])) {
  1878. /* event has overflowed in a buggy way*/
  1879. found = 1;
  1880. record_and_restart(event,
  1881. val[event->hw.idx - 1],
  1882. regs);
  1883. }
  1884. }
  1885. }
  1886. if (!found && !nmi && printk_ratelimit())
  1887. printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
  1888. /*
  1889. * Reset MMCR0 to its normal value. This will set PMXE and
  1890. * clear FC (freeze counters) and PMAO (perf mon alert occurred)
  1891. * and thus allow interrupts to occur again.
  1892. * XXX might want to use MSR.PM to keep the events frozen until
  1893. * we get back out of this interrupt.
  1894. */
  1895. write_mmcr0(cpuhw, cpuhw->mmcr[0]);
  1896. if (nmi)
  1897. nmi_exit();
  1898. else
  1899. irq_exit();
  1900. }
  1901. static int power_pmu_prepare_cpu(unsigned int cpu)
  1902. {
  1903. struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
  1904. if (ppmu) {
  1905. memset(cpuhw, 0, sizeof(*cpuhw));
  1906. cpuhw->mmcr[0] = MMCR0_FC;
  1907. }
  1908. return 0;
  1909. }
  1910. int register_power_pmu(struct power_pmu *pmu)
  1911. {
  1912. if (ppmu)
  1913. return -EBUSY; /* something's already registered */
  1914. ppmu = pmu;
  1915. pr_info("%s performance monitor hardware support registered\n",
  1916. pmu->name);
  1917. power_pmu.attr_groups = ppmu->attr_groups;
  1918. #ifdef MSR_HV
  1919. /*
  1920. * Use FCHV to ignore kernel events if MSR.HV is set.
  1921. */
  1922. if (mfmsr() & MSR_HV)
  1923. freeze_events_kernel = MMCR0_FCHV;
  1924. #endif /* CONFIG_PPC64 */
  1925. perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
  1926. cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
  1927. power_pmu_prepare_cpu, NULL);
  1928. return 0;
  1929. }