perf_event_intel_ds.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. #include <linux/bitops.h>
  2. #include <linux/types.h>
  3. #include <linux/slab.h>
  4. #include <asm/perf_event.h>
  5. #include <asm/insn.h>
  6. #include "perf_event.h"
  7. /* The size of a BTS record in bytes: */
  8. #define BTS_RECORD_SIZE 24
  9. #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
  10. #define PEBS_BUFFER_SIZE PAGE_SIZE
  11. #define PEBS_FIXUP_SIZE PAGE_SIZE
  12. /*
  13. * pebs_record_32 for p4 and core not supported
  14. struct pebs_record_32 {
  15. u32 flags, ip;
  16. u32 ax, bc, cx, dx;
  17. u32 si, di, bp, sp;
  18. };
  19. */
  20. union intel_x86_pebs_dse {
  21. u64 val;
  22. struct {
  23. unsigned int ld_dse:4;
  24. unsigned int ld_stlb_miss:1;
  25. unsigned int ld_locked:1;
  26. unsigned int ld_reserved:26;
  27. };
  28. struct {
  29. unsigned int st_l1d_hit:1;
  30. unsigned int st_reserved1:3;
  31. unsigned int st_stlb_miss:1;
  32. unsigned int st_locked:1;
  33. unsigned int st_reserved2:26;
  34. };
  35. };
  36. /*
  37. * Map PEBS Load Latency Data Source encodings to generic
  38. * memory data source information
  39. */
  40. #define P(a, b) PERF_MEM_S(a, b)
  41. #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
  42. #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
  43. static const u64 pebs_data_source[] = {
  44. P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
  45. OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
  46. OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
  47. OP_LH | P(LVL, L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
  48. OP_LH | P(LVL, L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
  49. OP_LH | P(LVL, L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
  50. OP_LH | P(LVL, L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
  51. OP_LH | P(LVL, L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
  52. OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
  53. OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
  54. OP_LH | P(LVL, LOC_RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
  55. OP_LH | P(LVL, REM_RAM1) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
  56. OP_LH | P(LVL, LOC_RAM) | SNOOP_NONE_MISS,/* 0x0c: L3 miss, excl */
  57. OP_LH | P(LVL, REM_RAM1) | SNOOP_NONE_MISS,/* 0x0d: L3 miss, excl */
  58. OP_LH | P(LVL, IO) | P(SNOOP, NONE), /* 0x0e: I/O */
  59. OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
  60. };
  61. static u64 precise_store_data(u64 status)
  62. {
  63. union intel_x86_pebs_dse dse;
  64. u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
  65. dse.val = status;
  66. /*
  67. * bit 4: TLB access
  68. * 1 = stored missed 2nd level TLB
  69. *
  70. * so it either hit the walker or the OS
  71. * otherwise hit 2nd level TLB
  72. */
  73. if (dse.st_stlb_miss)
  74. val |= P(TLB, MISS);
  75. else
  76. val |= P(TLB, HIT);
  77. /*
  78. * bit 0: hit L1 data cache
  79. * if not set, then all we know is that
  80. * it missed L1D
  81. */
  82. if (dse.st_l1d_hit)
  83. val |= P(LVL, HIT);
  84. else
  85. val |= P(LVL, MISS);
  86. /*
  87. * bit 5: Locked prefix
  88. */
  89. if (dse.st_locked)
  90. val |= P(LOCK, LOCKED);
  91. return val;
  92. }
  93. static u64 precise_datala_hsw(struct perf_event *event, u64 status)
  94. {
  95. union perf_mem_data_src dse;
  96. dse.val = PERF_MEM_NA;
  97. if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
  98. dse.mem_op = PERF_MEM_OP_STORE;
  99. else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
  100. dse.mem_op = PERF_MEM_OP_LOAD;
  101. /*
  102. * L1 info only valid for following events:
  103. *
  104. * MEM_UOPS_RETIRED.STLB_MISS_STORES
  105. * MEM_UOPS_RETIRED.LOCK_STORES
  106. * MEM_UOPS_RETIRED.SPLIT_STORES
  107. * MEM_UOPS_RETIRED.ALL_STORES
  108. */
  109. if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
  110. if (status & 1)
  111. dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
  112. else
  113. dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
  114. }
  115. return dse.val;
  116. }
  117. static u64 load_latency_data(u64 status)
  118. {
  119. union intel_x86_pebs_dse dse;
  120. u64 val;
  121. int model = boot_cpu_data.x86_model;
  122. int fam = boot_cpu_data.x86;
  123. dse.val = status;
  124. /*
  125. * use the mapping table for bit 0-3
  126. */
  127. val = pebs_data_source[dse.ld_dse];
  128. /*
  129. * Nehalem models do not support TLB, Lock infos
  130. */
  131. if (fam == 0x6 && (model == 26 || model == 30
  132. || model == 31 || model == 46)) {
  133. val |= P(TLB, NA) | P(LOCK, NA);
  134. return val;
  135. }
  136. /*
  137. * bit 4: TLB access
  138. * 0 = did not miss 2nd level TLB
  139. * 1 = missed 2nd level TLB
  140. */
  141. if (dse.ld_stlb_miss)
  142. val |= P(TLB, MISS) | P(TLB, L2);
  143. else
  144. val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
  145. /*
  146. * bit 5: locked prefix
  147. */
  148. if (dse.ld_locked)
  149. val |= P(LOCK, LOCKED);
  150. return val;
  151. }
  152. struct pebs_record_core {
  153. u64 flags, ip;
  154. u64 ax, bx, cx, dx;
  155. u64 si, di, bp, sp;
  156. u64 r8, r9, r10, r11;
  157. u64 r12, r13, r14, r15;
  158. };
  159. struct pebs_record_nhm {
  160. u64 flags, ip;
  161. u64 ax, bx, cx, dx;
  162. u64 si, di, bp, sp;
  163. u64 r8, r9, r10, r11;
  164. u64 r12, r13, r14, r15;
  165. u64 status, dla, dse, lat;
  166. };
  167. /*
  168. * Same as pebs_record_nhm, with two additional fields.
  169. */
  170. struct pebs_record_hsw {
  171. u64 flags, ip;
  172. u64 ax, bx, cx, dx;
  173. u64 si, di, bp, sp;
  174. u64 r8, r9, r10, r11;
  175. u64 r12, r13, r14, r15;
  176. u64 status, dla, dse, lat;
  177. u64 real_ip, tsx_tuning;
  178. };
  179. union hsw_tsx_tuning {
  180. struct {
  181. u32 cycles_last_block : 32,
  182. hle_abort : 1,
  183. rtm_abort : 1,
  184. instruction_abort : 1,
  185. non_instruction_abort : 1,
  186. retry : 1,
  187. data_conflict : 1,
  188. capacity_writes : 1,
  189. capacity_reads : 1;
  190. };
  191. u64 value;
  192. };
  193. #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
  194. void init_debug_store_on_cpu(int cpu)
  195. {
  196. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  197. if (!ds)
  198. return;
  199. wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
  200. (u32)((u64)(unsigned long)ds),
  201. (u32)((u64)(unsigned long)ds >> 32));
  202. }
  203. void fini_debug_store_on_cpu(int cpu)
  204. {
  205. if (!per_cpu(cpu_hw_events, cpu).ds)
  206. return;
  207. wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
  208. }
  209. static DEFINE_PER_CPU(void *, insn_buffer);
  210. static int alloc_pebs_buffer(int cpu)
  211. {
  212. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  213. int node = cpu_to_node(cpu);
  214. int max, thresh = 1; /* always use a single PEBS record */
  215. void *buffer, *ibuffer;
  216. if (!x86_pmu.pebs)
  217. return 0;
  218. buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
  219. if (unlikely(!buffer))
  220. return -ENOMEM;
  221. /*
  222. * HSW+ already provides us the eventing ip; no need to allocate this
  223. * buffer then.
  224. */
  225. if (x86_pmu.intel_cap.pebs_format < 2) {
  226. ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
  227. if (!ibuffer) {
  228. kfree(buffer);
  229. return -ENOMEM;
  230. }
  231. per_cpu(insn_buffer, cpu) = ibuffer;
  232. }
  233. max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
  234. ds->pebs_buffer_base = (u64)(unsigned long)buffer;
  235. ds->pebs_index = ds->pebs_buffer_base;
  236. ds->pebs_absolute_maximum = ds->pebs_buffer_base +
  237. max * x86_pmu.pebs_record_size;
  238. ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
  239. thresh * x86_pmu.pebs_record_size;
  240. return 0;
  241. }
  242. static void release_pebs_buffer(int cpu)
  243. {
  244. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  245. if (!ds || !x86_pmu.pebs)
  246. return;
  247. kfree(per_cpu(insn_buffer, cpu));
  248. per_cpu(insn_buffer, cpu) = NULL;
  249. kfree((void *)(unsigned long)ds->pebs_buffer_base);
  250. ds->pebs_buffer_base = 0;
  251. }
  252. static int alloc_bts_buffer(int cpu)
  253. {
  254. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  255. int node = cpu_to_node(cpu);
  256. int max, thresh;
  257. void *buffer;
  258. if (!x86_pmu.bts)
  259. return 0;
  260. buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
  261. if (unlikely(!buffer)) {
  262. WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
  263. return -ENOMEM;
  264. }
  265. max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
  266. thresh = max / 16;
  267. ds->bts_buffer_base = (u64)(unsigned long)buffer;
  268. ds->bts_index = ds->bts_buffer_base;
  269. ds->bts_absolute_maximum = ds->bts_buffer_base +
  270. max * BTS_RECORD_SIZE;
  271. ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
  272. thresh * BTS_RECORD_SIZE;
  273. return 0;
  274. }
  275. static void release_bts_buffer(int cpu)
  276. {
  277. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  278. if (!ds || !x86_pmu.bts)
  279. return;
  280. kfree((void *)(unsigned long)ds->bts_buffer_base);
  281. ds->bts_buffer_base = 0;
  282. }
  283. static int alloc_ds_buffer(int cpu)
  284. {
  285. int node = cpu_to_node(cpu);
  286. struct debug_store *ds;
  287. ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
  288. if (unlikely(!ds))
  289. return -ENOMEM;
  290. per_cpu(cpu_hw_events, cpu).ds = ds;
  291. return 0;
  292. }
  293. static void release_ds_buffer(int cpu)
  294. {
  295. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  296. if (!ds)
  297. return;
  298. per_cpu(cpu_hw_events, cpu).ds = NULL;
  299. kfree(ds);
  300. }
  301. void release_ds_buffers(void)
  302. {
  303. int cpu;
  304. if (!x86_pmu.bts && !x86_pmu.pebs)
  305. return;
  306. get_online_cpus();
  307. for_each_online_cpu(cpu)
  308. fini_debug_store_on_cpu(cpu);
  309. for_each_possible_cpu(cpu) {
  310. release_pebs_buffer(cpu);
  311. release_bts_buffer(cpu);
  312. release_ds_buffer(cpu);
  313. }
  314. put_online_cpus();
  315. }
  316. void reserve_ds_buffers(void)
  317. {
  318. int bts_err = 0, pebs_err = 0;
  319. int cpu;
  320. x86_pmu.bts_active = 0;
  321. x86_pmu.pebs_active = 0;
  322. if (!x86_pmu.bts && !x86_pmu.pebs)
  323. return;
  324. if (!x86_pmu.bts)
  325. bts_err = 1;
  326. if (!x86_pmu.pebs)
  327. pebs_err = 1;
  328. get_online_cpus();
  329. for_each_possible_cpu(cpu) {
  330. if (alloc_ds_buffer(cpu)) {
  331. bts_err = 1;
  332. pebs_err = 1;
  333. }
  334. if (!bts_err && alloc_bts_buffer(cpu))
  335. bts_err = 1;
  336. if (!pebs_err && alloc_pebs_buffer(cpu))
  337. pebs_err = 1;
  338. if (bts_err && pebs_err)
  339. break;
  340. }
  341. if (bts_err) {
  342. for_each_possible_cpu(cpu)
  343. release_bts_buffer(cpu);
  344. }
  345. if (pebs_err) {
  346. for_each_possible_cpu(cpu)
  347. release_pebs_buffer(cpu);
  348. }
  349. if (bts_err && pebs_err) {
  350. for_each_possible_cpu(cpu)
  351. release_ds_buffer(cpu);
  352. } else {
  353. if (x86_pmu.bts && !bts_err)
  354. x86_pmu.bts_active = 1;
  355. if (x86_pmu.pebs && !pebs_err)
  356. x86_pmu.pebs_active = 1;
  357. for_each_online_cpu(cpu)
  358. init_debug_store_on_cpu(cpu);
  359. }
  360. put_online_cpus();
  361. }
  362. /*
  363. * BTS
  364. */
  365. struct event_constraint bts_constraint =
  366. EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
  367. void intel_pmu_enable_bts(u64 config)
  368. {
  369. unsigned long debugctlmsr;
  370. debugctlmsr = get_debugctlmsr();
  371. debugctlmsr |= DEBUGCTLMSR_TR;
  372. debugctlmsr |= DEBUGCTLMSR_BTS;
  373. debugctlmsr |= DEBUGCTLMSR_BTINT;
  374. if (!(config & ARCH_PERFMON_EVENTSEL_OS))
  375. debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
  376. if (!(config & ARCH_PERFMON_EVENTSEL_USR))
  377. debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
  378. update_debugctlmsr(debugctlmsr);
  379. }
  380. void intel_pmu_disable_bts(void)
  381. {
  382. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  383. unsigned long debugctlmsr;
  384. if (!cpuc->ds)
  385. return;
  386. debugctlmsr = get_debugctlmsr();
  387. debugctlmsr &=
  388. ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
  389. DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
  390. update_debugctlmsr(debugctlmsr);
  391. }
  392. int intel_pmu_drain_bts_buffer(void)
  393. {
  394. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  395. struct debug_store *ds = cpuc->ds;
  396. struct bts_record {
  397. u64 from;
  398. u64 to;
  399. u64 flags;
  400. };
  401. struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
  402. struct bts_record *at, *top;
  403. struct perf_output_handle handle;
  404. struct perf_event_header header;
  405. struct perf_sample_data data;
  406. struct pt_regs regs;
  407. if (!event)
  408. return 0;
  409. if (!x86_pmu.bts_active)
  410. return 0;
  411. at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
  412. top = (struct bts_record *)(unsigned long)ds->bts_index;
  413. if (top <= at)
  414. return 0;
  415. memset(&regs, 0, sizeof(regs));
  416. ds->bts_index = ds->bts_buffer_base;
  417. perf_sample_data_init(&data, 0, event->hw.last_period);
  418. /*
  419. * Prepare a generic sample, i.e. fill in the invariant fields.
  420. * We will overwrite the from and to address before we output
  421. * the sample.
  422. */
  423. perf_prepare_sample(&header, &data, event, &regs);
  424. if (perf_output_begin(&handle, event, header.size * (top - at)))
  425. return 1;
  426. for (; at < top; at++) {
  427. data.ip = at->from;
  428. data.addr = at->to;
  429. perf_output_sample(&handle, &header, &data, event);
  430. }
  431. perf_output_end(&handle);
  432. /* There's new data available. */
  433. event->hw.interrupts++;
  434. event->pending_kill = POLL_IN;
  435. return 1;
  436. }
  437. /*
  438. * PEBS
  439. */
  440. struct event_constraint intel_core2_pebs_event_constraints[] = {
  441. INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
  442. INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
  443. INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
  444. INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
  445. INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
  446. EVENT_CONSTRAINT_END
  447. };
  448. struct event_constraint intel_atom_pebs_event_constraints[] = {
  449. INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
  450. INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
  451. INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
  452. EVENT_CONSTRAINT_END
  453. };
  454. struct event_constraint intel_slm_pebs_event_constraints[] = {
  455. /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
  456. INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
  457. /* Allow all events as PEBS with no flags */
  458. INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
  459. EVENT_CONSTRAINT_END
  460. };
  461. struct event_constraint intel_nehalem_pebs_event_constraints[] = {
  462. INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
  463. INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
  464. INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
  465. INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
  466. INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
  467. INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
  468. INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
  469. INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
  470. INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
  471. INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
  472. INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
  473. EVENT_CONSTRAINT_END
  474. };
  475. struct event_constraint intel_westmere_pebs_event_constraints[] = {
  476. INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
  477. INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
  478. INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
  479. INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
  480. INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
  481. INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
  482. INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
  483. INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
  484. INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
  485. INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
  486. INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
  487. EVENT_CONSTRAINT_END
  488. };
  489. struct event_constraint intel_snb_pebs_event_constraints[] = {
  490. INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
  491. INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
  492. INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
  493. /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
  494. INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
  495. /* Allow all events as PEBS with no flags */
  496. INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
  497. EVENT_CONSTRAINT_END
  498. };
  499. struct event_constraint intel_ivb_pebs_event_constraints[] = {
  500. INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
  501. INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
  502. INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
  503. /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
  504. INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
  505. /* Allow all events as PEBS with no flags */
  506. INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
  507. EVENT_CONSTRAINT_END
  508. };
  509. struct event_constraint intel_hsw_pebs_event_constraints[] = {
  510. INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
  511. INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
  512. /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
  513. INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
  514. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
  515. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
  516. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
  517. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
  518. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
  519. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
  520. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
  521. INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
  522. INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
  523. INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
  524. INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
  525. /* Allow all events as PEBS with no flags */
  526. INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
  527. EVENT_CONSTRAINT_END
  528. };
  529. struct event_constraint *intel_pebs_constraints(struct perf_event *event)
  530. {
  531. struct event_constraint *c;
  532. if (!event->attr.precise_ip)
  533. return NULL;
  534. if (x86_pmu.pebs_constraints) {
  535. for_each_event_constraint(c, x86_pmu.pebs_constraints) {
  536. if ((event->hw.config & c->cmask) == c->code) {
  537. event->hw.flags |= c->flags;
  538. return c;
  539. }
  540. }
  541. }
  542. return &emptyconstraint;
  543. }
  544. void intel_pmu_pebs_enable(struct perf_event *event)
  545. {
  546. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  547. struct hw_perf_event *hwc = &event->hw;
  548. hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
  549. cpuc->pebs_enabled |= 1ULL << hwc->idx;
  550. if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
  551. cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
  552. else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
  553. cpuc->pebs_enabled |= 1ULL << 63;
  554. }
  555. void intel_pmu_pebs_disable(struct perf_event *event)
  556. {
  557. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  558. struct hw_perf_event *hwc = &event->hw;
  559. cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
  560. if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT)
  561. cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
  562. else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST)
  563. cpuc->pebs_enabled &= ~(1ULL << 63);
  564. if (cpuc->enabled)
  565. wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
  566. hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
  567. }
  568. void intel_pmu_pebs_enable_all(void)
  569. {
  570. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  571. if (cpuc->pebs_enabled)
  572. wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
  573. }
  574. void intel_pmu_pebs_disable_all(void)
  575. {
  576. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  577. if (cpuc->pebs_enabled)
  578. wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
  579. }
  580. static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
  581. {
  582. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  583. unsigned long from = cpuc->lbr_entries[0].from;
  584. unsigned long old_to, to = cpuc->lbr_entries[0].to;
  585. unsigned long ip = regs->ip;
  586. int is_64bit = 0;
  587. void *kaddr;
  588. /*
  589. * We don't need to fixup if the PEBS assist is fault like
  590. */
  591. if (!x86_pmu.intel_cap.pebs_trap)
  592. return 1;
  593. /*
  594. * No LBR entry, no basic block, no rewinding
  595. */
  596. if (!cpuc->lbr_stack.nr || !from || !to)
  597. return 0;
  598. /*
  599. * Basic blocks should never cross user/kernel boundaries
  600. */
  601. if (kernel_ip(ip) != kernel_ip(to))
  602. return 0;
  603. /*
  604. * unsigned math, either ip is before the start (impossible) or
  605. * the basic block is larger than 1 page (sanity)
  606. */
  607. if ((ip - to) > PEBS_FIXUP_SIZE)
  608. return 0;
  609. /*
  610. * We sampled a branch insn, rewind using the LBR stack
  611. */
  612. if (ip == to) {
  613. set_linear_ip(regs, from);
  614. return 1;
  615. }
  616. if (!kernel_ip(ip)) {
  617. int size, bytes;
  618. u8 *buf = this_cpu_read(insn_buffer);
  619. size = ip - to; /* Must fit our buffer, see above */
  620. bytes = copy_from_user_nmi(buf, (void __user *)to, size);
  621. if (bytes != 0)
  622. return 0;
  623. kaddr = buf;
  624. } else {
  625. kaddr = (void *)to;
  626. }
  627. do {
  628. struct insn insn;
  629. old_to = to;
  630. #ifdef CONFIG_X86_64
  631. is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
  632. #endif
  633. insn_init(&insn, kaddr, is_64bit);
  634. insn_get_length(&insn);
  635. to += insn.length;
  636. kaddr += insn.length;
  637. } while (to < ip);
  638. if (to == ip) {
  639. set_linear_ip(regs, old_to);
  640. return 1;
  641. }
  642. /*
  643. * Even though we decoded the basic block, the instruction stream
  644. * never matched the given IP, either the TO or the IP got corrupted.
  645. */
  646. return 0;
  647. }
  648. static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs)
  649. {
  650. if (pebs->tsx_tuning) {
  651. union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
  652. return tsx.cycles_last_block;
  653. }
  654. return 0;
  655. }
  656. static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
  657. {
  658. u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
  659. /* For RTM XABORTs also log the abort code from AX */
  660. if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
  661. txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
  662. return txn;
  663. }
  664. static void __intel_pmu_pebs_event(struct perf_event *event,
  665. struct pt_regs *iregs, void *__pebs)
  666. {
  667. #define PERF_X86_EVENT_PEBS_HSW_PREC \
  668. (PERF_X86_EVENT_PEBS_ST_HSW | \
  669. PERF_X86_EVENT_PEBS_LD_HSW | \
  670. PERF_X86_EVENT_PEBS_NA_HSW)
  671. /*
  672. * We cast to the biggest pebs_record but are careful not to
  673. * unconditionally access the 'extra' entries.
  674. */
  675. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  676. struct pebs_record_hsw *pebs = __pebs;
  677. struct perf_sample_data data;
  678. struct pt_regs regs;
  679. u64 sample_type;
  680. int fll, fst, dsrc;
  681. int fl = event->hw.flags;
  682. if (!intel_pmu_save_and_restart(event))
  683. return;
  684. sample_type = event->attr.sample_type;
  685. dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
  686. fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
  687. fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
  688. perf_sample_data_init(&data, 0, event->hw.last_period);
  689. data.period = event->hw.last_period;
  690. /*
  691. * Use latency for weight (only avail with PEBS-LL)
  692. */
  693. if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
  694. data.weight = pebs->lat;
  695. /*
  696. * data.data_src encodes the data source
  697. */
  698. if (dsrc) {
  699. u64 val = PERF_MEM_NA;
  700. if (fll)
  701. val = load_latency_data(pebs->dse);
  702. else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
  703. val = precise_datala_hsw(event, pebs->dse);
  704. else if (fst)
  705. val = precise_store_data(pebs->dse);
  706. data.data_src.val = val;
  707. }
  708. /*
  709. * We use the interrupt regs as a base because the PEBS record
  710. * does not contain a full regs set, specifically it seems to
  711. * lack segment descriptors, which get used by things like
  712. * user_mode().
  713. *
  714. * In the simple case fix up only the IP and BP,SP regs, for
  715. * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
  716. * A possible PERF_SAMPLE_REGS will have to transfer all regs.
  717. */
  718. regs = *iregs;
  719. regs.flags = pebs->flags;
  720. set_linear_ip(&regs, pebs->ip);
  721. regs.bp = pebs->bp;
  722. regs.sp = pebs->sp;
  723. if (sample_type & PERF_SAMPLE_REGS_INTR) {
  724. regs.ax = pebs->ax;
  725. regs.bx = pebs->bx;
  726. regs.cx = pebs->cx;
  727. regs.dx = pebs->dx;
  728. regs.si = pebs->si;
  729. regs.di = pebs->di;
  730. regs.bp = pebs->bp;
  731. regs.sp = pebs->sp;
  732. regs.flags = pebs->flags;
  733. #ifndef CONFIG_X86_32
  734. regs.r8 = pebs->r8;
  735. regs.r9 = pebs->r9;
  736. regs.r10 = pebs->r10;
  737. regs.r11 = pebs->r11;
  738. regs.r12 = pebs->r12;
  739. regs.r13 = pebs->r13;
  740. regs.r14 = pebs->r14;
  741. regs.r15 = pebs->r15;
  742. #endif
  743. }
  744. if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
  745. regs.ip = pebs->real_ip;
  746. regs.flags |= PERF_EFLAGS_EXACT;
  747. } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
  748. regs.flags |= PERF_EFLAGS_EXACT;
  749. else
  750. regs.flags &= ~PERF_EFLAGS_EXACT;
  751. if ((sample_type & PERF_SAMPLE_ADDR) &&
  752. x86_pmu.intel_cap.pebs_format >= 1)
  753. data.addr = pebs->dla;
  754. if (x86_pmu.intel_cap.pebs_format >= 2) {
  755. /* Only set the TSX weight when no memory weight. */
  756. if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
  757. data.weight = intel_hsw_weight(pebs);
  758. if (sample_type & PERF_SAMPLE_TRANSACTION)
  759. data.txn = intel_hsw_transaction(pebs);
  760. }
  761. if (has_branch_stack(event))
  762. data.br_stack = &cpuc->lbr_stack;
  763. if (perf_event_overflow(event, &data, &regs))
  764. x86_pmu_stop(event, 0);
  765. }
  766. static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
  767. {
  768. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  769. struct debug_store *ds = cpuc->ds;
  770. struct perf_event *event = cpuc->events[0]; /* PMC0 only */
  771. struct pebs_record_core *at, *top;
  772. int n;
  773. if (!x86_pmu.pebs_active)
  774. return;
  775. at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
  776. top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
  777. /*
  778. * Whatever else happens, drain the thing
  779. */
  780. ds->pebs_index = ds->pebs_buffer_base;
  781. if (!test_bit(0, cpuc->active_mask))
  782. return;
  783. WARN_ON_ONCE(!event);
  784. if (!event->attr.precise_ip)
  785. return;
  786. n = top - at;
  787. if (n <= 0)
  788. return;
  789. /*
  790. * Should not happen, we program the threshold at 1 and do not
  791. * set a reset value.
  792. */
  793. WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
  794. at += n - 1;
  795. __intel_pmu_pebs_event(event, iregs, at);
  796. }
  797. static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
  798. {
  799. struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
  800. struct debug_store *ds = cpuc->ds;
  801. struct perf_event *event = NULL;
  802. void *at, *top;
  803. u64 status = 0;
  804. int bit;
  805. if (!x86_pmu.pebs_active)
  806. return;
  807. at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
  808. top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
  809. ds->pebs_index = ds->pebs_buffer_base;
  810. if (unlikely(at > top))
  811. return;
  812. /*
  813. * Should not happen, we program the threshold at 1 and do not
  814. * set a reset value.
  815. */
  816. WARN_ONCE(top - at > x86_pmu.max_pebs_events * x86_pmu.pebs_record_size,
  817. "Unexpected number of pebs records %ld\n",
  818. (long)(top - at) / x86_pmu.pebs_record_size);
  819. for (; at < top; at += x86_pmu.pebs_record_size) {
  820. struct pebs_record_nhm *p = at;
  821. for_each_set_bit(bit, (unsigned long *)&p->status,
  822. x86_pmu.max_pebs_events) {
  823. event = cpuc->events[bit];
  824. if (!test_bit(bit, cpuc->active_mask))
  825. continue;
  826. WARN_ON_ONCE(!event);
  827. if (!event->attr.precise_ip)
  828. continue;
  829. if (__test_and_set_bit(bit, (unsigned long *)&status))
  830. continue;
  831. break;
  832. }
  833. if (!event || bit >= x86_pmu.max_pebs_events)
  834. continue;
  835. __intel_pmu_pebs_event(event, iregs, at);
  836. }
  837. }
  838. /*
  839. * BTS, PEBS probe and setup
  840. */
  841. void __init intel_ds_init(void)
  842. {
  843. /*
  844. * No support for 32bit formats
  845. */
  846. if (!boot_cpu_has(X86_FEATURE_DTES64))
  847. return;
  848. x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
  849. x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
  850. if (x86_pmu.pebs) {
  851. char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
  852. int format = x86_pmu.intel_cap.pebs_format;
  853. switch (format) {
  854. case 0:
  855. printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
  856. x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
  857. x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
  858. break;
  859. case 1:
  860. printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
  861. x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
  862. x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
  863. break;
  864. case 2:
  865. pr_cont("PEBS fmt2%c, ", pebs_type);
  866. x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
  867. x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
  868. break;
  869. default:
  870. printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
  871. x86_pmu.pebs = 0;
  872. }
  873. }
  874. }
  875. void perf_restore_debug_store(void)
  876. {
  877. struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
  878. if (!x86_pmu.bts && !x86_pmu.pebs)
  879. return;
  880. wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
  881. }