perf_event.h 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
  2. /*
  3. * Performance events:
  4. *
  5. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  7. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  8. *
  9. * Data type definitions, declarations, prototypes.
  10. *
  11. * Started by: Thomas Gleixner and Ingo Molnar
  12. *
  13. * For licencing details see kernel-base/COPYING
  14. */
  15. #ifndef _UAPI_LINUX_PERF_EVENT_H
  16. #define _UAPI_LINUX_PERF_EVENT_H
  17. #include <linux/types.h>
  18. #include <linux/ioctl.h>
  19. #include <asm/byteorder.h>
  20. /*
  21. * User-space ABI bits:
  22. */
  23. /*
  24. * attr.type
  25. */
  26. enum perf_type_id {
  27. PERF_TYPE_HARDWARE = 0,
  28. PERF_TYPE_SOFTWARE = 1,
  29. PERF_TYPE_TRACEPOINT = 2,
  30. PERF_TYPE_HW_CACHE = 3,
  31. PERF_TYPE_RAW = 4,
  32. PERF_TYPE_BREAKPOINT = 5,
  33. PERF_TYPE_MAX, /* non-ABI */
  34. };
  35. /*
  36. * Generalized performance event event_id types, used by the
  37. * attr.event_id parameter of the sys_perf_event_open()
  38. * syscall:
  39. */
  40. enum perf_hw_id {
  41. /*
  42. * Common hardware events, generalized by the kernel:
  43. */
  44. PERF_COUNT_HW_CPU_CYCLES = 0,
  45. PERF_COUNT_HW_INSTRUCTIONS = 1,
  46. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  47. PERF_COUNT_HW_CACHE_MISSES = 3,
  48. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  49. PERF_COUNT_HW_BRANCH_MISSES = 5,
  50. PERF_COUNT_HW_BUS_CYCLES = 6,
  51. PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
  52. PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
  53. PERF_COUNT_HW_REF_CPU_CYCLES = 9,
  54. PERF_COUNT_HW_MAX, /* non-ABI */
  55. };
  56. /*
  57. * Generalized hardware cache events:
  58. *
  59. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
  60. * { read, write, prefetch } x
  61. * { accesses, misses }
  62. */
  63. enum perf_hw_cache_id {
  64. PERF_COUNT_HW_CACHE_L1D = 0,
  65. PERF_COUNT_HW_CACHE_L1I = 1,
  66. PERF_COUNT_HW_CACHE_LL = 2,
  67. PERF_COUNT_HW_CACHE_DTLB = 3,
  68. PERF_COUNT_HW_CACHE_ITLB = 4,
  69. PERF_COUNT_HW_CACHE_BPU = 5,
  70. PERF_COUNT_HW_CACHE_NODE = 6,
  71. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  72. };
  73. enum perf_hw_cache_op_id {
  74. PERF_COUNT_HW_CACHE_OP_READ = 0,
  75. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  76. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  77. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  78. };
  79. enum perf_hw_cache_op_result_id {
  80. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  81. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  82. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  83. };
  84. /*
  85. * Special "software" events provided by the kernel, even if the hardware
  86. * does not support performance events. These events measure various
  87. * physical and sw events of the kernel (and allow the profiling of them as
  88. * well):
  89. */
  90. enum perf_sw_ids {
  91. PERF_COUNT_SW_CPU_CLOCK = 0,
  92. PERF_COUNT_SW_TASK_CLOCK = 1,
  93. PERF_COUNT_SW_PAGE_FAULTS = 2,
  94. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  95. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  96. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  97. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  98. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  99. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  100. PERF_COUNT_SW_DUMMY = 9,
  101. PERF_COUNT_SW_BPF_OUTPUT = 10,
  102. PERF_COUNT_SW_MAX, /* non-ABI */
  103. };
  104. /*
  105. * Bits that can be set in attr.sample_type to request information
  106. * in the overflow packets.
  107. */
  108. enum perf_event_sample_format {
  109. PERF_SAMPLE_IP = 1U << 0,
  110. PERF_SAMPLE_TID = 1U << 1,
  111. PERF_SAMPLE_TIME = 1U << 2,
  112. PERF_SAMPLE_ADDR = 1U << 3,
  113. PERF_SAMPLE_READ = 1U << 4,
  114. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  115. PERF_SAMPLE_ID = 1U << 6,
  116. PERF_SAMPLE_CPU = 1U << 7,
  117. PERF_SAMPLE_PERIOD = 1U << 8,
  118. PERF_SAMPLE_STREAM_ID = 1U << 9,
  119. PERF_SAMPLE_RAW = 1U << 10,
  120. PERF_SAMPLE_BRANCH_STACK = 1U << 11,
  121. PERF_SAMPLE_REGS_USER = 1U << 12,
  122. PERF_SAMPLE_STACK_USER = 1U << 13,
  123. PERF_SAMPLE_WEIGHT = 1U << 14,
  124. PERF_SAMPLE_DATA_SRC = 1U << 15,
  125. PERF_SAMPLE_IDENTIFIER = 1U << 16,
  126. PERF_SAMPLE_TRANSACTION = 1U << 17,
  127. PERF_SAMPLE_REGS_INTR = 1U << 18,
  128. PERF_SAMPLE_PHYS_ADDR = 1U << 19,
  129. PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
  130. };
  131. /*
  132. * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
  133. *
  134. * If the user does not pass priv level information via branch_sample_type,
  135. * the kernel uses the event's priv level. Branch and event priv levels do
  136. * not have to match. Branch priv level is checked for permissions.
  137. *
  138. * The branch types can be combined, however BRANCH_ANY covers all types
  139. * of branches and therefore it supersedes all the other types.
  140. */
  141. enum perf_branch_sample_type_shift {
  142. PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
  143. PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
  144. PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
  145. PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
  146. PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
  147. PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
  148. PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
  149. PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
  150. PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
  151. PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
  152. PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
  153. PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
  154. PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
  155. PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
  156. PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
  157. PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
  158. PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
  159. PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
  160. };
  161. enum perf_branch_sample_type {
  162. PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
  163. PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
  164. PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
  165. PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
  166. PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
  167. PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
  168. PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
  169. PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
  170. PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
  171. PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
  172. PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
  173. PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
  174. PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
  175. PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
  176. PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
  177. PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
  178. PERF_SAMPLE_BRANCH_TYPE_SAVE =
  179. 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
  180. PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
  181. };
  182. /*
  183. * Common flow change classification
  184. */
  185. enum {
  186. PERF_BR_UNKNOWN = 0, /* unknown */
  187. PERF_BR_COND = 1, /* conditional */
  188. PERF_BR_UNCOND = 2, /* unconditional */
  189. PERF_BR_IND = 3, /* indirect */
  190. PERF_BR_CALL = 4, /* function call */
  191. PERF_BR_IND_CALL = 5, /* indirect function call */
  192. PERF_BR_RET = 6, /* function return */
  193. PERF_BR_SYSCALL = 7, /* syscall */
  194. PERF_BR_SYSRET = 8, /* syscall return */
  195. PERF_BR_COND_CALL = 9, /* conditional function call */
  196. PERF_BR_COND_RET = 10, /* conditional function return */
  197. PERF_BR_MAX,
  198. };
  199. #define PERF_SAMPLE_BRANCH_PLM_ALL \
  200. (PERF_SAMPLE_BRANCH_USER|\
  201. PERF_SAMPLE_BRANCH_KERNEL|\
  202. PERF_SAMPLE_BRANCH_HV)
  203. /*
  204. * Values to determine ABI of the registers dump.
  205. */
  206. enum perf_sample_regs_abi {
  207. PERF_SAMPLE_REGS_ABI_NONE = 0,
  208. PERF_SAMPLE_REGS_ABI_32 = 1,
  209. PERF_SAMPLE_REGS_ABI_64 = 2,
  210. };
  211. /*
  212. * Values for the memory transaction event qualifier, mostly for
  213. * abort events. Multiple bits can be set.
  214. */
  215. enum {
  216. PERF_TXN_ELISION = (1 << 0), /* From elision */
  217. PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
  218. PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
  219. PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
  220. PERF_TXN_RETRY = (1 << 4), /* Retry possible */
  221. PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
  222. PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
  223. PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
  224. PERF_TXN_MAX = (1 << 8), /* non-ABI */
  225. /* bits 32..63 are reserved for the abort code */
  226. PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
  227. PERF_TXN_ABORT_SHIFT = 32,
  228. };
  229. /*
  230. * The format of the data returned by read() on a perf event fd,
  231. * as specified by attr.read_format:
  232. *
  233. * struct read_format {
  234. * { u64 value;
  235. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  236. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  237. * { u64 id; } && PERF_FORMAT_ID
  238. * } && !PERF_FORMAT_GROUP
  239. *
  240. * { u64 nr;
  241. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  242. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  243. * { u64 value;
  244. * { u64 id; } && PERF_FORMAT_ID
  245. * } cntr[nr];
  246. * } && PERF_FORMAT_GROUP
  247. * };
  248. */
  249. enum perf_event_read_format {
  250. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  251. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  252. PERF_FORMAT_ID = 1U << 2,
  253. PERF_FORMAT_GROUP = 1U << 3,
  254. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  255. };
  256. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  257. #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
  258. #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
  259. #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
  260. /* add: sample_stack_user */
  261. #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
  262. #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
  263. /*
  264. * Hardware event_id to monitor via a performance monitoring event:
  265. *
  266. * @sample_max_stack: Max number of frame pointers in a callchain,
  267. * should be < /proc/sys/kernel/perf_event_max_stack
  268. */
  269. struct perf_event_attr {
  270. /*
  271. * Major type: hardware/software/tracepoint/etc.
  272. */
  273. __u32 type;
  274. /*
  275. * Size of the attr structure, for fwd/bwd compat.
  276. */
  277. __u32 size;
  278. /*
  279. * Type specific configuration information.
  280. */
  281. __u64 config;
  282. union {
  283. __u64 sample_period;
  284. __u64 sample_freq;
  285. };
  286. __u64 sample_type;
  287. __u64 read_format;
  288. __u64 disabled : 1, /* off by default */
  289. inherit : 1, /* children inherit it */
  290. pinned : 1, /* must always be on PMU */
  291. exclusive : 1, /* only group on PMU */
  292. exclude_user : 1, /* don't count user */
  293. exclude_kernel : 1, /* ditto kernel */
  294. exclude_hv : 1, /* ditto hypervisor */
  295. exclude_idle : 1, /* don't count when idle */
  296. mmap : 1, /* include mmap data */
  297. comm : 1, /* include comm data */
  298. freq : 1, /* use freq, not period */
  299. inherit_stat : 1, /* per task counts */
  300. enable_on_exec : 1, /* next exec enables */
  301. task : 1, /* trace fork/exit */
  302. watermark : 1, /* wakeup_watermark */
  303. /*
  304. * precise_ip:
  305. *
  306. * 0 - SAMPLE_IP can have arbitrary skid
  307. * 1 - SAMPLE_IP must have constant skid
  308. * 2 - SAMPLE_IP requested to have 0 skid
  309. * 3 - SAMPLE_IP must have 0 skid
  310. *
  311. * See also PERF_RECORD_MISC_EXACT_IP
  312. */
  313. precise_ip : 2, /* skid constraint */
  314. mmap_data : 1, /* non-exec mmap data */
  315. sample_id_all : 1, /* sample_type all events */
  316. exclude_host : 1, /* don't count in host */
  317. exclude_guest : 1, /* don't count in guest */
  318. exclude_callchain_kernel : 1, /* exclude kernel callchains */
  319. exclude_callchain_user : 1, /* exclude user callchains */
  320. mmap2 : 1, /* include mmap with inode data */
  321. comm_exec : 1, /* flag comm events that are due to an exec */
  322. use_clockid : 1, /* use @clockid for time fields */
  323. context_switch : 1, /* context switch data */
  324. write_backward : 1, /* Write ring buffer from end to beginning */
  325. namespaces : 1, /* include namespaces data */
  326. __reserved_1 : 35;
  327. union {
  328. __u32 wakeup_events; /* wakeup every n events */
  329. __u32 wakeup_watermark; /* bytes before wakeup */
  330. };
  331. __u32 bp_type;
  332. union {
  333. __u64 bp_addr;
  334. __u64 config1; /* extension of config */
  335. };
  336. union {
  337. __u64 bp_len;
  338. __u64 config2; /* extension of config1 */
  339. };
  340. __u64 branch_sample_type; /* enum perf_branch_sample_type */
  341. /*
  342. * Defines set of user regs to dump on samples.
  343. * See asm/perf_regs.h for details.
  344. */
  345. __u64 sample_regs_user;
  346. /*
  347. * Defines size of the user stack to dump on samples.
  348. */
  349. __u32 sample_stack_user;
  350. __s32 clockid;
  351. /*
  352. * Defines set of regs to dump for each sample
  353. * state captured on:
  354. * - precise = 0: PMU interrupt
  355. * - precise > 0: sampled instruction
  356. *
  357. * See asm/perf_regs.h for details.
  358. */
  359. __u64 sample_regs_intr;
  360. /*
  361. * Wakeup watermark for AUX area
  362. */
  363. __u32 aux_watermark;
  364. __u16 sample_max_stack;
  365. __u16 __reserved_2; /* align to __u64 */
  366. };
  367. #define perf_flags(attr) (*(&(attr)->read_format + 1))
  368. /*
  369. * Ioctls that can be done on a perf event fd:
  370. */
  371. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  372. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  373. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  374. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  375. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  376. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  377. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  378. #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
  379. #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
  380. #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
  381. enum perf_event_ioc_flags {
  382. PERF_IOC_FLAG_GROUP = 1U << 0,
  383. };
  384. /*
  385. * Structure of the page that can be mapped via mmap
  386. */
  387. struct perf_event_mmap_page {
  388. __u32 version; /* version number of this structure */
  389. __u32 compat_version; /* lowest version this is compat with */
  390. /*
  391. * Bits needed to read the hw events in user-space.
  392. *
  393. * u32 seq, time_mult, time_shift, index, width;
  394. * u64 count, enabled, running;
  395. * u64 cyc, time_offset;
  396. * s64 pmc = 0;
  397. *
  398. * do {
  399. * seq = pc->lock;
  400. * barrier()
  401. *
  402. * enabled = pc->time_enabled;
  403. * running = pc->time_running;
  404. *
  405. * if (pc->cap_usr_time && enabled != running) {
  406. * cyc = rdtsc();
  407. * time_offset = pc->time_offset;
  408. * time_mult = pc->time_mult;
  409. * time_shift = pc->time_shift;
  410. * }
  411. *
  412. * index = pc->index;
  413. * count = pc->offset;
  414. * if (pc->cap_user_rdpmc && index) {
  415. * width = pc->pmc_width;
  416. * pmc = rdpmc(index - 1);
  417. * }
  418. *
  419. * barrier();
  420. * } while (pc->lock != seq);
  421. *
  422. * NOTE: for obvious reason this only works on self-monitoring
  423. * processes.
  424. */
  425. __u32 lock; /* seqlock for synchronization */
  426. __u32 index; /* hardware event identifier */
  427. __s64 offset; /* add to hardware event value */
  428. __u64 time_enabled; /* time event active */
  429. __u64 time_running; /* time event on cpu */
  430. union {
  431. __u64 capabilities;
  432. struct {
  433. __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
  434. cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
  435. cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
  436. cap_user_time : 1, /* The time_* fields are used */
  437. cap_user_time_zero : 1, /* The time_zero field is used */
  438. cap_____res : 59;
  439. };
  440. };
  441. /*
  442. * If cap_user_rdpmc this field provides the bit-width of the value
  443. * read using the rdpmc() or equivalent instruction. This can be used
  444. * to sign extend the result like:
  445. *
  446. * pmc <<= 64 - width;
  447. * pmc >>= 64 - width; // signed shift right
  448. * count += pmc;
  449. */
  450. __u16 pmc_width;
  451. /*
  452. * If cap_usr_time the below fields can be used to compute the time
  453. * delta since time_enabled (in ns) using rdtsc or similar.
  454. *
  455. * u64 quot, rem;
  456. * u64 delta;
  457. *
  458. * quot = (cyc >> time_shift);
  459. * rem = cyc & (((u64)1 << time_shift) - 1);
  460. * delta = time_offset + quot * time_mult +
  461. * ((rem * time_mult) >> time_shift);
  462. *
  463. * Where time_offset,time_mult,time_shift and cyc are read in the
  464. * seqcount loop described above. This delta can then be added to
  465. * enabled and possible running (if index), improving the scaling:
  466. *
  467. * enabled += delta;
  468. * if (index)
  469. * running += delta;
  470. *
  471. * quot = count / running;
  472. * rem = count % running;
  473. * count = quot * enabled + (rem * enabled) / running;
  474. */
  475. __u16 time_shift;
  476. __u32 time_mult;
  477. __u64 time_offset;
  478. /*
  479. * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
  480. * from sample timestamps.
  481. *
  482. * time = timestamp - time_zero;
  483. * quot = time / time_mult;
  484. * rem = time % time_mult;
  485. * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
  486. *
  487. * And vice versa:
  488. *
  489. * quot = cyc >> time_shift;
  490. * rem = cyc & (((u64)1 << time_shift) - 1);
  491. * timestamp = time_zero + quot * time_mult +
  492. * ((rem * time_mult) >> time_shift);
  493. */
  494. __u64 time_zero;
  495. __u32 size; /* Header size up to __reserved[] fields. */
  496. /*
  497. * Hole for extension of the self monitor capabilities
  498. */
  499. __u8 __reserved[118*8+4]; /* align to 1k. */
  500. /*
  501. * Control data for the mmap() data buffer.
  502. *
  503. * User-space reading the @data_head value should issue an smp_rmb(),
  504. * after reading this value.
  505. *
  506. * When the mapping is PROT_WRITE the @data_tail value should be
  507. * written by userspace to reflect the last read data, after issueing
  508. * an smp_mb() to separate the data read from the ->data_tail store.
  509. * In this case the kernel will not over-write unread data.
  510. *
  511. * See perf_output_put_handle() for the data ordering.
  512. *
  513. * data_{offset,size} indicate the location and size of the perf record
  514. * buffer within the mmapped area.
  515. */
  516. __u64 data_head; /* head in the data section */
  517. __u64 data_tail; /* user-space written tail */
  518. __u64 data_offset; /* where the buffer starts */
  519. __u64 data_size; /* data buffer size */
  520. /*
  521. * AUX area is defined by aux_{offset,size} fields that should be set
  522. * by the userspace, so that
  523. *
  524. * aux_offset >= data_offset + data_size
  525. *
  526. * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
  527. *
  528. * Ring buffer pointers aux_{head,tail} have the same semantics as
  529. * data_{head,tail} and same ordering rules apply.
  530. */
  531. __u64 aux_head;
  532. __u64 aux_tail;
  533. __u64 aux_offset;
  534. __u64 aux_size;
  535. };
  536. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  537. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  538. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  539. #define PERF_RECORD_MISC_USER (2 << 0)
  540. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  541. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  542. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  543. /*
  544. * Indicates that /proc/PID/maps parsing are truncated by time out.
  545. */
  546. #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
  547. /*
  548. * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
  549. * different events so can reuse the same bit position.
  550. * Ditto PERF_RECORD_MISC_SWITCH_OUT.
  551. */
  552. #define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
  553. #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
  554. #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
  555. /*
  556. * Indicates that the content of PERF_SAMPLE_IP points to
  557. * the actual instruction that triggered the event. See also
  558. * perf_event_attr::precise_ip.
  559. */
  560. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  561. /*
  562. * Reserve the last bit to indicate some extended misc field
  563. */
  564. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  565. struct perf_event_header {
  566. __u32 type;
  567. __u16 misc;
  568. __u16 size;
  569. };
  570. struct perf_ns_link_info {
  571. __u64 dev;
  572. __u64 ino;
  573. };
  574. enum {
  575. NET_NS_INDEX = 0,
  576. UTS_NS_INDEX = 1,
  577. IPC_NS_INDEX = 2,
  578. PID_NS_INDEX = 3,
  579. USER_NS_INDEX = 4,
  580. MNT_NS_INDEX = 5,
  581. CGROUP_NS_INDEX = 6,
  582. NR_NAMESPACES, /* number of available namespaces */
  583. };
  584. enum perf_event_type {
  585. /*
  586. * If perf_event_attr.sample_id_all is set then all event types will
  587. * have the sample_type selected fields related to where/when
  588. * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
  589. * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
  590. * just after the perf_event_header and the fields already present for
  591. * the existing fields, i.e. at the end of the payload. That way a newer
  592. * perf.data file will be supported by older perf tools, with these new
  593. * optional fields being ignored.
  594. *
  595. * struct sample_id {
  596. * { u32 pid, tid; } && PERF_SAMPLE_TID
  597. * { u64 time; } && PERF_SAMPLE_TIME
  598. * { u64 id; } && PERF_SAMPLE_ID
  599. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  600. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  601. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  602. * } && perf_event_attr::sample_id_all
  603. *
  604. * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
  605. * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
  606. * relative to header.size.
  607. */
  608. /*
  609. * The MMAP events record the PROT_EXEC mappings so that we can
  610. * correlate userspace IPs to code. They have the following structure:
  611. *
  612. * struct {
  613. * struct perf_event_header header;
  614. *
  615. * u32 pid, tid;
  616. * u64 addr;
  617. * u64 len;
  618. * u64 pgoff;
  619. * char filename[];
  620. * struct sample_id sample_id;
  621. * };
  622. */
  623. PERF_RECORD_MMAP = 1,
  624. /*
  625. * struct {
  626. * struct perf_event_header header;
  627. * u64 id;
  628. * u64 lost;
  629. * struct sample_id sample_id;
  630. * };
  631. */
  632. PERF_RECORD_LOST = 2,
  633. /*
  634. * struct {
  635. * struct perf_event_header header;
  636. *
  637. * u32 pid, tid;
  638. * char comm[];
  639. * struct sample_id sample_id;
  640. * };
  641. */
  642. PERF_RECORD_COMM = 3,
  643. /*
  644. * struct {
  645. * struct perf_event_header header;
  646. * u32 pid, ppid;
  647. * u32 tid, ptid;
  648. * u64 time;
  649. * struct sample_id sample_id;
  650. * };
  651. */
  652. PERF_RECORD_EXIT = 4,
  653. /*
  654. * struct {
  655. * struct perf_event_header header;
  656. * u64 time;
  657. * u64 id;
  658. * u64 stream_id;
  659. * struct sample_id sample_id;
  660. * };
  661. */
  662. PERF_RECORD_THROTTLE = 5,
  663. PERF_RECORD_UNTHROTTLE = 6,
  664. /*
  665. * struct {
  666. * struct perf_event_header header;
  667. * u32 pid, ppid;
  668. * u32 tid, ptid;
  669. * u64 time;
  670. * struct sample_id sample_id;
  671. * };
  672. */
  673. PERF_RECORD_FORK = 7,
  674. /*
  675. * struct {
  676. * struct perf_event_header header;
  677. * u32 pid, tid;
  678. *
  679. * struct read_format values;
  680. * struct sample_id sample_id;
  681. * };
  682. */
  683. PERF_RECORD_READ = 8,
  684. /*
  685. * struct {
  686. * struct perf_event_header header;
  687. *
  688. * #
  689. * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
  690. * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
  691. * # is fixed relative to header.
  692. * #
  693. *
  694. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  695. * { u64 ip; } && PERF_SAMPLE_IP
  696. * { u32 pid, tid; } && PERF_SAMPLE_TID
  697. * { u64 time; } && PERF_SAMPLE_TIME
  698. * { u64 addr; } && PERF_SAMPLE_ADDR
  699. * { u64 id; } && PERF_SAMPLE_ID
  700. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  701. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  702. * { u64 period; } && PERF_SAMPLE_PERIOD
  703. *
  704. * { struct read_format values; } && PERF_SAMPLE_READ
  705. *
  706. * { u64 nr,
  707. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  708. *
  709. * #
  710. * # The RAW record below is opaque data wrt the ABI
  711. * #
  712. * # That is, the ABI doesn't make any promises wrt to
  713. * # the stability of its content, it may vary depending
  714. * # on event, hardware, kernel version and phase of
  715. * # the moon.
  716. * #
  717. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  718. * #
  719. *
  720. * { u32 size;
  721. * char data[size];}&& PERF_SAMPLE_RAW
  722. *
  723. * { u64 nr;
  724. * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
  725. *
  726. * { u64 abi; # enum perf_sample_regs_abi
  727. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
  728. *
  729. * { u64 size;
  730. * char data[size];
  731. * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
  732. *
  733. * { u64 weight; } && PERF_SAMPLE_WEIGHT
  734. * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
  735. * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
  736. * { u64 abi; # enum perf_sample_regs_abi
  737. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
  738. * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
  739. * };
  740. */
  741. PERF_RECORD_SAMPLE = 9,
  742. /*
  743. * The MMAP2 records are an augmented version of MMAP, they add
  744. * maj, min, ino numbers to be used to uniquely identify each mapping
  745. *
  746. * struct {
  747. * struct perf_event_header header;
  748. *
  749. * u32 pid, tid;
  750. * u64 addr;
  751. * u64 len;
  752. * u64 pgoff;
  753. * u32 maj;
  754. * u32 min;
  755. * u64 ino;
  756. * u64 ino_generation;
  757. * u32 prot, flags;
  758. * char filename[];
  759. * struct sample_id sample_id;
  760. * };
  761. */
  762. PERF_RECORD_MMAP2 = 10,
  763. /*
  764. * Records that new data landed in the AUX buffer part.
  765. *
  766. * struct {
  767. * struct perf_event_header header;
  768. *
  769. * u64 aux_offset;
  770. * u64 aux_size;
  771. * u64 flags;
  772. * struct sample_id sample_id;
  773. * };
  774. */
  775. PERF_RECORD_AUX = 11,
  776. /*
  777. * Indicates that instruction trace has started
  778. *
  779. * struct {
  780. * struct perf_event_header header;
  781. * u32 pid;
  782. * u32 tid;
  783. * };
  784. */
  785. PERF_RECORD_ITRACE_START = 12,
  786. /*
  787. * Records the dropped/lost sample number.
  788. *
  789. * struct {
  790. * struct perf_event_header header;
  791. *
  792. * u64 lost;
  793. * struct sample_id sample_id;
  794. * };
  795. */
  796. PERF_RECORD_LOST_SAMPLES = 13,
  797. /*
  798. * Records a context switch in or out (flagged by
  799. * PERF_RECORD_MISC_SWITCH_OUT). See also
  800. * PERF_RECORD_SWITCH_CPU_WIDE.
  801. *
  802. * struct {
  803. * struct perf_event_header header;
  804. * struct sample_id sample_id;
  805. * };
  806. */
  807. PERF_RECORD_SWITCH = 14,
  808. /*
  809. * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
  810. * next_prev_tid that are the next (switching out) or previous
  811. * (switching in) pid/tid.
  812. *
  813. * struct {
  814. * struct perf_event_header header;
  815. * u32 next_prev_pid;
  816. * u32 next_prev_tid;
  817. * struct sample_id sample_id;
  818. * };
  819. */
  820. PERF_RECORD_SWITCH_CPU_WIDE = 15,
  821. /*
  822. * struct {
  823. * struct perf_event_header header;
  824. * u32 pid;
  825. * u32 tid;
  826. * u64 nr_namespaces;
  827. * { u64 dev, inode; } [nr_namespaces];
  828. * struct sample_id sample_id;
  829. * };
  830. */
  831. PERF_RECORD_NAMESPACES = 16,
  832. PERF_RECORD_MAX, /* non-ABI */
  833. };
  834. #define PERF_MAX_STACK_DEPTH 127
  835. #define PERF_MAX_CONTEXTS_PER_STACK 8
  836. enum perf_callchain_context {
  837. PERF_CONTEXT_HV = (__u64)-32,
  838. PERF_CONTEXT_KERNEL = (__u64)-128,
  839. PERF_CONTEXT_USER = (__u64)-512,
  840. PERF_CONTEXT_GUEST = (__u64)-2048,
  841. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  842. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  843. PERF_CONTEXT_MAX = (__u64)-4095,
  844. };
  845. /**
  846. * PERF_RECORD_AUX::flags bits
  847. */
  848. #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
  849. #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
  850. #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
  851. #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
  852. #define PERF_FLAG_FD_NO_GROUP (1UL << 0)
  853. #define PERF_FLAG_FD_OUTPUT (1UL << 1)
  854. #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
  855. #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
  856. #if defined(__LITTLE_ENDIAN_BITFIELD)
  857. union perf_mem_data_src {
  858. __u64 val;
  859. struct {
  860. __u64 mem_op:5, /* type of opcode */
  861. mem_lvl:14, /* memory hierarchy level */
  862. mem_snoop:5, /* snoop mode */
  863. mem_lock:2, /* lock instr */
  864. mem_dtlb:7, /* tlb access */
  865. mem_lvl_num:4, /* memory hierarchy level number */
  866. mem_remote:1, /* remote */
  867. mem_snoopx:2, /* snoop mode, ext */
  868. mem_rsvd:24;
  869. };
  870. };
  871. #elif defined(__BIG_ENDIAN_BITFIELD)
  872. union perf_mem_data_src {
  873. __u64 val;
  874. struct {
  875. __u64 mem_rsvd:24,
  876. mem_snoopx:2, /* snoop mode, ext */
  877. mem_remote:1, /* remote */
  878. mem_lvl_num:4, /* memory hierarchy level number */
  879. mem_dtlb:7, /* tlb access */
  880. mem_lock:2, /* lock instr */
  881. mem_snoop:5, /* snoop mode */
  882. mem_lvl:14, /* memory hierarchy level */
  883. mem_op:5; /* type of opcode */
  884. };
  885. };
  886. #else
  887. #error "Unknown endianness"
  888. #endif
  889. /* type of opcode (load/store/prefetch,code) */
  890. #define PERF_MEM_OP_NA 0x01 /* not available */
  891. #define PERF_MEM_OP_LOAD 0x02 /* load instruction */
  892. #define PERF_MEM_OP_STORE 0x04 /* store instruction */
  893. #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
  894. #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
  895. #define PERF_MEM_OP_SHIFT 0
  896. /* memory hierarchy (memory level, hit or miss) */
  897. #define PERF_MEM_LVL_NA 0x01 /* not available */
  898. #define PERF_MEM_LVL_HIT 0x02 /* hit level */
  899. #define PERF_MEM_LVL_MISS 0x04 /* miss level */
  900. #define PERF_MEM_LVL_L1 0x08 /* L1 */
  901. #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
  902. #define PERF_MEM_LVL_L2 0x20 /* L2 */
  903. #define PERF_MEM_LVL_L3 0x40 /* L3 */
  904. #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
  905. #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
  906. #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
  907. #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
  908. #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
  909. #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
  910. #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
  911. #define PERF_MEM_LVL_SHIFT 5
  912. #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
  913. #define PERF_MEM_REMOTE_SHIFT 37
  914. #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
  915. #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
  916. #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
  917. #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
  918. /* 5-0xa available */
  919. #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
  920. #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
  921. #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
  922. #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
  923. #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
  924. #define PERF_MEM_LVLNUM_SHIFT 33
  925. /* snoop mode */
  926. #define PERF_MEM_SNOOP_NA 0x01 /* not available */
  927. #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
  928. #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
  929. #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
  930. #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
  931. #define PERF_MEM_SNOOP_SHIFT 19
  932. #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
  933. /* 1 free */
  934. #define PERF_MEM_SNOOPX_SHIFT 37
  935. /* locked instruction */
  936. #define PERF_MEM_LOCK_NA 0x01 /* not available */
  937. #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
  938. #define PERF_MEM_LOCK_SHIFT 24
  939. /* TLB access */
  940. #define PERF_MEM_TLB_NA 0x01 /* not available */
  941. #define PERF_MEM_TLB_HIT 0x02 /* hit level */
  942. #define PERF_MEM_TLB_MISS 0x04 /* miss level */
  943. #define PERF_MEM_TLB_L1 0x08 /* L1 */
  944. #define PERF_MEM_TLB_L2 0x10 /* L2 */
  945. #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
  946. #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
  947. #define PERF_MEM_TLB_SHIFT 26
  948. #define PERF_MEM_S(a, s) \
  949. (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
  950. /*
  951. * single taken branch record layout:
  952. *
  953. * from: source instruction (may not always be a branch insn)
  954. * to: branch target
  955. * mispred: branch target was mispredicted
  956. * predicted: branch target was predicted
  957. *
  958. * support for mispred, predicted is optional. In case it
  959. * is not supported mispred = predicted = 0.
  960. *
  961. * in_tx: running in a hardware transaction
  962. * abort: aborting a hardware transaction
  963. * cycles: cycles from last branch (or 0 if not supported)
  964. * type: branch type
  965. */
  966. struct perf_branch_entry {
  967. __u64 from;
  968. __u64 to;
  969. __u64 mispred:1, /* target mispredicted */
  970. predicted:1,/* target predicted */
  971. in_tx:1, /* in transaction */
  972. abort:1, /* transaction abort */
  973. cycles:16, /* cycle count to last branch */
  974. type:4, /* branch type */
  975. reserved:40;
  976. };
  977. #endif /* _UAPI_LINUX_PERF_EVENT_H */