perf_event.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _UAPI_LINUX_PERF_EVENT_H
  15. #define _UAPI_LINUX_PERF_EVENT_H
  16. #include <linux/types.h>
  17. #include <linux/ioctl.h>
  18. #include <asm/byteorder.h>
  19. /*
  20. * User-space ABI bits:
  21. */
  22. /*
  23. * attr.type
  24. */
  25. enum perf_type_id {
  26. PERF_TYPE_HARDWARE = 0,
  27. PERF_TYPE_SOFTWARE = 1,
  28. PERF_TYPE_TRACEPOINT = 2,
  29. PERF_TYPE_HW_CACHE = 3,
  30. PERF_TYPE_RAW = 4,
  31. PERF_TYPE_BREAKPOINT = 5,
  32. PERF_TYPE_MAX, /* non-ABI */
  33. };
  34. /*
  35. * Generalized performance event event_id types, used by the
  36. * attr.event_id parameter of the sys_perf_event_open()
  37. * syscall:
  38. */
  39. enum perf_hw_id {
  40. /*
  41. * Common hardware events, generalized by the kernel:
  42. */
  43. PERF_COUNT_HW_CPU_CYCLES = 0,
  44. PERF_COUNT_HW_INSTRUCTIONS = 1,
  45. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  46. PERF_COUNT_HW_CACHE_MISSES = 3,
  47. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  48. PERF_COUNT_HW_BRANCH_MISSES = 5,
  49. PERF_COUNT_HW_BUS_CYCLES = 6,
  50. PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
  51. PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
  52. PERF_COUNT_HW_REF_CPU_CYCLES = 9,
  53. PERF_COUNT_HW_MAX, /* non-ABI */
  54. };
  55. /*
  56. * Generalized hardware cache events:
  57. *
  58. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
  59. * { read, write, prefetch } x
  60. * { accesses, misses }
  61. */
  62. enum perf_hw_cache_id {
  63. PERF_COUNT_HW_CACHE_L1D = 0,
  64. PERF_COUNT_HW_CACHE_L1I = 1,
  65. PERF_COUNT_HW_CACHE_LL = 2,
  66. PERF_COUNT_HW_CACHE_DTLB = 3,
  67. PERF_COUNT_HW_CACHE_ITLB = 4,
  68. PERF_COUNT_HW_CACHE_BPU = 5,
  69. PERF_COUNT_HW_CACHE_NODE = 6,
  70. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  71. };
  72. enum perf_hw_cache_op_id {
  73. PERF_COUNT_HW_CACHE_OP_READ = 0,
  74. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  75. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  76. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  77. };
  78. enum perf_hw_cache_op_result_id {
  79. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  80. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  81. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  82. };
  83. /*
  84. * Special "software" events provided by the kernel, even if the hardware
  85. * does not support performance events. These events measure various
  86. * physical and sw events of the kernel (and allow the profiling of them as
  87. * well):
  88. */
  89. enum perf_sw_ids {
  90. PERF_COUNT_SW_CPU_CLOCK = 0,
  91. PERF_COUNT_SW_TASK_CLOCK = 1,
  92. PERF_COUNT_SW_PAGE_FAULTS = 2,
  93. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  94. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  95. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  96. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  97. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  98. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  99. PERF_COUNT_SW_DUMMY = 9,
  100. PERF_COUNT_SW_MAX, /* non-ABI */
  101. };
  102. /*
  103. * Bits that can be set in attr.sample_type to request information
  104. * in the overflow packets.
  105. */
  106. enum perf_event_sample_format {
  107. PERF_SAMPLE_IP = 1U << 0,
  108. PERF_SAMPLE_TID = 1U << 1,
  109. PERF_SAMPLE_TIME = 1U << 2,
  110. PERF_SAMPLE_ADDR = 1U << 3,
  111. PERF_SAMPLE_READ = 1U << 4,
  112. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  113. PERF_SAMPLE_ID = 1U << 6,
  114. PERF_SAMPLE_CPU = 1U << 7,
  115. PERF_SAMPLE_PERIOD = 1U << 8,
  116. PERF_SAMPLE_STREAM_ID = 1U << 9,
  117. PERF_SAMPLE_RAW = 1U << 10,
  118. PERF_SAMPLE_BRANCH_STACK = 1U << 11,
  119. PERF_SAMPLE_REGS_USER = 1U << 12,
  120. PERF_SAMPLE_STACK_USER = 1U << 13,
  121. PERF_SAMPLE_WEIGHT = 1U << 14,
  122. PERF_SAMPLE_DATA_SRC = 1U << 15,
  123. PERF_SAMPLE_IDENTIFIER = 1U << 16,
  124. PERF_SAMPLE_TRANSACTION = 1U << 17,
  125. PERF_SAMPLE_REGS_INTR = 1U << 18,
  126. PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
  127. };
  128. /*
  129. * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
  130. *
  131. * If the user does not pass priv level information via branch_sample_type,
  132. * the kernel uses the event's priv level. Branch and event priv levels do
  133. * not have to match. Branch priv level is checked for permissions.
  134. *
  135. * The branch types can be combined, however BRANCH_ANY covers all types
  136. * of branches and therefore it supersedes all the other types.
  137. */
  138. enum perf_branch_sample_type_shift {
  139. PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
  140. PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
  141. PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
  142. PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
  143. PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
  144. PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
  145. PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
  146. PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
  147. PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
  148. PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
  149. PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
  150. PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
  151. PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
  152. };
  153. enum perf_branch_sample_type {
  154. PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
  155. PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
  156. PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
  157. PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
  158. PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
  159. PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
  160. PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
  161. PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
  162. PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
  163. PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
  164. PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
  165. PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
  166. PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
  167. };
  168. #define PERF_SAMPLE_BRANCH_PLM_ALL \
  169. (PERF_SAMPLE_BRANCH_USER|\
  170. PERF_SAMPLE_BRANCH_KERNEL|\
  171. PERF_SAMPLE_BRANCH_HV)
  172. /*
  173. * Values to determine ABI of the registers dump.
  174. */
  175. enum perf_sample_regs_abi {
  176. PERF_SAMPLE_REGS_ABI_NONE = 0,
  177. PERF_SAMPLE_REGS_ABI_32 = 1,
  178. PERF_SAMPLE_REGS_ABI_64 = 2,
  179. };
  180. /*
  181. * Values for the memory transaction event qualifier, mostly for
  182. * abort events. Multiple bits can be set.
  183. */
  184. enum {
  185. PERF_TXN_ELISION = (1 << 0), /* From elision */
  186. PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
  187. PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
  188. PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
  189. PERF_TXN_RETRY = (1 << 4), /* Retry possible */
  190. PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
  191. PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
  192. PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
  193. PERF_TXN_MAX = (1 << 8), /* non-ABI */
  194. /* bits 32..63 are reserved for the abort code */
  195. PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
  196. PERF_TXN_ABORT_SHIFT = 32,
  197. };
  198. /*
  199. * The format of the data returned by read() on a perf event fd,
  200. * as specified by attr.read_format:
  201. *
  202. * struct read_format {
  203. * { u64 value;
  204. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  205. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  206. * { u64 id; } && PERF_FORMAT_ID
  207. * } && !PERF_FORMAT_GROUP
  208. *
  209. * { u64 nr;
  210. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  211. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  212. * { u64 value;
  213. * { u64 id; } && PERF_FORMAT_ID
  214. * } cntr[nr];
  215. * } && PERF_FORMAT_GROUP
  216. * };
  217. */
  218. enum perf_event_read_format {
  219. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  220. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  221. PERF_FORMAT_ID = 1U << 2,
  222. PERF_FORMAT_GROUP = 1U << 3,
  223. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  224. };
  225. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  226. #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
  227. #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
  228. #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
  229. /* add: sample_stack_user */
  230. #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
  231. #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
  232. /*
  233. * Hardware event_id to monitor via a performance monitoring event:
  234. */
  235. struct perf_event_attr {
  236. /*
  237. * Major type: hardware/software/tracepoint/etc.
  238. */
  239. __u32 type;
  240. /*
  241. * Size of the attr structure, for fwd/bwd compat.
  242. */
  243. __u32 size;
  244. /*
  245. * Type specific configuration information.
  246. */
  247. __u64 config;
  248. union {
  249. __u64 sample_period;
  250. __u64 sample_freq;
  251. };
  252. __u64 sample_type;
  253. __u64 read_format;
  254. __u64 disabled : 1, /* off by default */
  255. inherit : 1, /* children inherit it */
  256. pinned : 1, /* must always be on PMU */
  257. exclusive : 1, /* only group on PMU */
  258. exclude_user : 1, /* don't count user */
  259. exclude_kernel : 1, /* ditto kernel */
  260. exclude_hv : 1, /* ditto hypervisor */
  261. exclude_idle : 1, /* don't count when idle */
  262. mmap : 1, /* include mmap data */
  263. comm : 1, /* include comm data */
  264. freq : 1, /* use freq, not period */
  265. inherit_stat : 1, /* per task counts */
  266. enable_on_exec : 1, /* next exec enables */
  267. task : 1, /* trace fork/exit */
  268. watermark : 1, /* wakeup_watermark */
  269. /*
  270. * precise_ip:
  271. *
  272. * 0 - SAMPLE_IP can have arbitrary skid
  273. * 1 - SAMPLE_IP must have constant skid
  274. * 2 - SAMPLE_IP requested to have 0 skid
  275. * 3 - SAMPLE_IP must have 0 skid
  276. *
  277. * See also PERF_RECORD_MISC_EXACT_IP
  278. */
  279. precise_ip : 2, /* skid constraint */
  280. mmap_data : 1, /* non-exec mmap data */
  281. sample_id_all : 1, /* sample_type all events */
  282. exclude_host : 1, /* don't count in host */
  283. exclude_guest : 1, /* don't count in guest */
  284. exclude_callchain_kernel : 1, /* exclude kernel callchains */
  285. exclude_callchain_user : 1, /* exclude user callchains */
  286. mmap2 : 1, /* include mmap with inode data */
  287. comm_exec : 1, /* flag comm events that are due to an exec */
  288. use_clockid : 1, /* use @clockid for time fields */
  289. __reserved_1 : 38;
  290. union {
  291. __u32 wakeup_events; /* wakeup every n events */
  292. __u32 wakeup_watermark; /* bytes before wakeup */
  293. };
  294. __u32 bp_type;
  295. union {
  296. __u64 bp_addr;
  297. __u64 config1; /* extension of config */
  298. };
  299. union {
  300. __u64 bp_len;
  301. __u64 config2; /* extension of config1 */
  302. };
  303. __u64 branch_sample_type; /* enum perf_branch_sample_type */
  304. /*
  305. * Defines set of user regs to dump on samples.
  306. * See asm/perf_regs.h for details.
  307. */
  308. __u64 sample_regs_user;
  309. /*
  310. * Defines size of the user stack to dump on samples.
  311. */
  312. __u32 sample_stack_user;
  313. __s32 clockid;
  314. /*
  315. * Defines set of regs to dump for each sample
  316. * state captured on:
  317. * - precise = 0: PMU interrupt
  318. * - precise > 0: sampled instruction
  319. *
  320. * See asm/perf_regs.h for details.
  321. */
  322. __u64 sample_regs_intr;
  323. /*
  324. * Wakeup watermark for AUX area
  325. */
  326. __u32 aux_watermark;
  327. __u32 __reserved_2; /* align to __u64 */
  328. };
  329. #define perf_flags(attr) (*(&(attr)->read_format + 1))
  330. /*
  331. * Ioctls that can be done on a perf event fd:
  332. */
  333. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  334. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  335. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  336. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  337. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  338. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  339. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  340. #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
  341. #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
  342. enum perf_event_ioc_flags {
  343. PERF_IOC_FLAG_GROUP = 1U << 0,
  344. };
  345. /*
  346. * Structure of the page that can be mapped via mmap
  347. */
  348. struct perf_event_mmap_page {
  349. __u32 version; /* version number of this structure */
  350. __u32 compat_version; /* lowest version this is compat with */
  351. /*
  352. * Bits needed to read the hw events in user-space.
  353. *
  354. * u32 seq, time_mult, time_shift, index, width;
  355. * u64 count, enabled, running;
  356. * u64 cyc, time_offset;
  357. * s64 pmc = 0;
  358. *
  359. * do {
  360. * seq = pc->lock;
  361. * barrier()
  362. *
  363. * enabled = pc->time_enabled;
  364. * running = pc->time_running;
  365. *
  366. * if (pc->cap_usr_time && enabled != running) {
  367. * cyc = rdtsc();
  368. * time_offset = pc->time_offset;
  369. * time_mult = pc->time_mult;
  370. * time_shift = pc->time_shift;
  371. * }
  372. *
  373. * index = pc->index;
  374. * count = pc->offset;
  375. * if (pc->cap_user_rdpmc && index) {
  376. * width = pc->pmc_width;
  377. * pmc = rdpmc(index - 1);
  378. * }
  379. *
  380. * barrier();
  381. * } while (pc->lock != seq);
  382. *
  383. * NOTE: for obvious reason this only works on self-monitoring
  384. * processes.
  385. */
  386. __u32 lock; /* seqlock for synchronization */
  387. __u32 index; /* hardware event identifier */
  388. __s64 offset; /* add to hardware event value */
  389. __u64 time_enabled; /* time event active */
  390. __u64 time_running; /* time event on cpu */
  391. union {
  392. __u64 capabilities;
  393. struct {
  394. __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
  395. cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
  396. cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
  397. cap_user_time : 1, /* The time_* fields are used */
  398. cap_user_time_zero : 1, /* The time_zero field is used */
  399. cap_____res : 59;
  400. };
  401. };
  402. /*
  403. * If cap_user_rdpmc this field provides the bit-width of the value
  404. * read using the rdpmc() or equivalent instruction. This can be used
  405. * to sign extend the result like:
  406. *
  407. * pmc <<= 64 - width;
  408. * pmc >>= 64 - width; // signed shift right
  409. * count += pmc;
  410. */
  411. __u16 pmc_width;
  412. /*
  413. * If cap_usr_time the below fields can be used to compute the time
  414. * delta since time_enabled (in ns) using rdtsc or similar.
  415. *
  416. * u64 quot, rem;
  417. * u64 delta;
  418. *
  419. * quot = (cyc >> time_shift);
  420. * rem = cyc & ((1 << time_shift) - 1);
  421. * delta = time_offset + quot * time_mult +
  422. * ((rem * time_mult) >> time_shift);
  423. *
  424. * Where time_offset,time_mult,time_shift and cyc are read in the
  425. * seqcount loop described above. This delta can then be added to
  426. * enabled and possible running (if index), improving the scaling:
  427. *
  428. * enabled += delta;
  429. * if (index)
  430. * running += delta;
  431. *
  432. * quot = count / running;
  433. * rem = count % running;
  434. * count = quot * enabled + (rem * enabled) / running;
  435. */
  436. __u16 time_shift;
  437. __u32 time_mult;
  438. __u64 time_offset;
  439. /*
  440. * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
  441. * from sample timestamps.
  442. *
  443. * time = timestamp - time_zero;
  444. * quot = time / time_mult;
  445. * rem = time % time_mult;
  446. * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
  447. *
  448. * And vice versa:
  449. *
  450. * quot = cyc >> time_shift;
  451. * rem = cyc & ((1 << time_shift) - 1);
  452. * timestamp = time_zero + quot * time_mult +
  453. * ((rem * time_mult) >> time_shift);
  454. */
  455. __u64 time_zero;
  456. __u32 size; /* Header size up to __reserved[] fields. */
  457. /*
  458. * Hole for extension of the self monitor capabilities
  459. */
  460. __u8 __reserved[118*8+4]; /* align to 1k. */
  461. /*
  462. * Control data for the mmap() data buffer.
  463. *
  464. * User-space reading the @data_head value should issue an smp_rmb(),
  465. * after reading this value.
  466. *
  467. * When the mapping is PROT_WRITE the @data_tail value should be
  468. * written by userspace to reflect the last read data, after issueing
  469. * an smp_mb() to separate the data read from the ->data_tail store.
  470. * In this case the kernel will not over-write unread data.
  471. *
  472. * See perf_output_put_handle() for the data ordering.
  473. *
  474. * data_{offset,size} indicate the location and size of the perf record
  475. * buffer within the mmapped area.
  476. */
  477. __u64 data_head; /* head in the data section */
  478. __u64 data_tail; /* user-space written tail */
  479. __u64 data_offset; /* where the buffer starts */
  480. __u64 data_size; /* data buffer size */
  481. /*
  482. * AUX area is defined by aux_{offset,size} fields that should be set
  483. * by the userspace, so that
  484. *
  485. * aux_offset >= data_offset + data_size
  486. *
  487. * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
  488. *
  489. * Ring buffer pointers aux_{head,tail} have the same semantics as
  490. * data_{head,tail} and same ordering rules apply.
  491. */
  492. __u64 aux_head;
  493. __u64 aux_tail;
  494. __u64 aux_offset;
  495. __u64 aux_size;
  496. };
  497. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  498. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  499. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  500. #define PERF_RECORD_MISC_USER (2 << 0)
  501. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  502. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  503. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  504. /*
  505. * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
  506. * different events so can reuse the same bit position.
  507. */
  508. #define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
  509. #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
  510. /*
  511. * Indicates that the content of PERF_SAMPLE_IP points to
  512. * the actual instruction that triggered the event. See also
  513. * perf_event_attr::precise_ip.
  514. */
  515. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  516. /*
  517. * Reserve the last bit to indicate some extended misc field
  518. */
  519. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  520. struct perf_event_header {
  521. __u32 type;
  522. __u16 misc;
  523. __u16 size;
  524. };
  525. enum perf_event_type {
  526. /*
  527. * If perf_event_attr.sample_id_all is set then all event types will
  528. * have the sample_type selected fields related to where/when
  529. * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
  530. * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
  531. * just after the perf_event_header and the fields already present for
  532. * the existing fields, i.e. at the end of the payload. That way a newer
  533. * perf.data file will be supported by older perf tools, with these new
  534. * optional fields being ignored.
  535. *
  536. * struct sample_id {
  537. * { u32 pid, tid; } && PERF_SAMPLE_TID
  538. * { u64 time; } && PERF_SAMPLE_TIME
  539. * { u64 id; } && PERF_SAMPLE_ID
  540. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  541. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  542. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  543. * } && perf_event_attr::sample_id_all
  544. *
  545. * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
  546. * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
  547. * relative to header.size.
  548. */
  549. /*
  550. * The MMAP events record the PROT_EXEC mappings so that we can
  551. * correlate userspace IPs to code. They have the following structure:
  552. *
  553. * struct {
  554. * struct perf_event_header header;
  555. *
  556. * u32 pid, tid;
  557. * u64 addr;
  558. * u64 len;
  559. * u64 pgoff;
  560. * char filename[];
  561. * struct sample_id sample_id;
  562. * };
  563. */
  564. PERF_RECORD_MMAP = 1,
  565. /*
  566. * struct {
  567. * struct perf_event_header header;
  568. * u64 id;
  569. * u64 lost;
  570. * struct sample_id sample_id;
  571. * };
  572. */
  573. PERF_RECORD_LOST = 2,
  574. /*
  575. * struct {
  576. * struct perf_event_header header;
  577. *
  578. * u32 pid, tid;
  579. * char comm[];
  580. * struct sample_id sample_id;
  581. * };
  582. */
  583. PERF_RECORD_COMM = 3,
  584. /*
  585. * struct {
  586. * struct perf_event_header header;
  587. * u32 pid, ppid;
  588. * u32 tid, ptid;
  589. * u64 time;
  590. * struct sample_id sample_id;
  591. * };
  592. */
  593. PERF_RECORD_EXIT = 4,
  594. /*
  595. * struct {
  596. * struct perf_event_header header;
  597. * u64 time;
  598. * u64 id;
  599. * u64 stream_id;
  600. * struct sample_id sample_id;
  601. * };
  602. */
  603. PERF_RECORD_THROTTLE = 5,
  604. PERF_RECORD_UNTHROTTLE = 6,
  605. /*
  606. * struct {
  607. * struct perf_event_header header;
  608. * u32 pid, ppid;
  609. * u32 tid, ptid;
  610. * u64 time;
  611. * struct sample_id sample_id;
  612. * };
  613. */
  614. PERF_RECORD_FORK = 7,
  615. /*
  616. * struct {
  617. * struct perf_event_header header;
  618. * u32 pid, tid;
  619. *
  620. * struct read_format values;
  621. * struct sample_id sample_id;
  622. * };
  623. */
  624. PERF_RECORD_READ = 8,
  625. /*
  626. * struct {
  627. * struct perf_event_header header;
  628. *
  629. * #
  630. * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
  631. * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
  632. * # is fixed relative to header.
  633. * #
  634. *
  635. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  636. * { u64 ip; } && PERF_SAMPLE_IP
  637. * { u32 pid, tid; } && PERF_SAMPLE_TID
  638. * { u64 time; } && PERF_SAMPLE_TIME
  639. * { u64 addr; } && PERF_SAMPLE_ADDR
  640. * { u64 id; } && PERF_SAMPLE_ID
  641. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  642. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  643. * { u64 period; } && PERF_SAMPLE_PERIOD
  644. *
  645. * { struct read_format values; } && PERF_SAMPLE_READ
  646. *
  647. * { u64 nr,
  648. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  649. *
  650. * #
  651. * # The RAW record below is opaque data wrt the ABI
  652. * #
  653. * # That is, the ABI doesn't make any promises wrt to
  654. * # the stability of its content, it may vary depending
  655. * # on event, hardware, kernel version and phase of
  656. * # the moon.
  657. * #
  658. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  659. * #
  660. *
  661. * { u32 size;
  662. * char data[size];}&& PERF_SAMPLE_RAW
  663. *
  664. * { u64 nr;
  665. * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
  666. *
  667. * { u64 abi; # enum perf_sample_regs_abi
  668. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
  669. *
  670. * { u64 size;
  671. * char data[size];
  672. * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
  673. *
  674. * { u64 weight; } && PERF_SAMPLE_WEIGHT
  675. * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
  676. * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
  677. * { u64 abi; # enum perf_sample_regs_abi
  678. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
  679. * };
  680. */
  681. PERF_RECORD_SAMPLE = 9,
  682. /*
  683. * The MMAP2 records are an augmented version of MMAP, they add
  684. * maj, min, ino numbers to be used to uniquely identify each mapping
  685. *
  686. * struct {
  687. * struct perf_event_header header;
  688. *
  689. * u32 pid, tid;
  690. * u64 addr;
  691. * u64 len;
  692. * u64 pgoff;
  693. * u32 maj;
  694. * u32 min;
  695. * u64 ino;
  696. * u64 ino_generation;
  697. * u32 prot, flags;
  698. * char filename[];
  699. * struct sample_id sample_id;
  700. * };
  701. */
  702. PERF_RECORD_MMAP2 = 10,
  703. /*
  704. * Records that new data landed in the AUX buffer part.
  705. *
  706. * struct {
  707. * struct perf_event_header header;
  708. *
  709. * u64 aux_offset;
  710. * u64 aux_size;
  711. * u64 flags;
  712. * struct sample_id sample_id;
  713. * };
  714. */
  715. PERF_RECORD_AUX = 11,
  716. /*
  717. * Indicates that instruction trace has started
  718. *
  719. * struct {
  720. * struct perf_event_header header;
  721. * u32 pid;
  722. * u32 tid;
  723. * };
  724. */
  725. PERF_RECORD_ITRACE_START = 12,
  726. PERF_RECORD_MAX, /* non-ABI */
  727. };
  728. #define PERF_MAX_STACK_DEPTH 127
  729. enum perf_callchain_context {
  730. PERF_CONTEXT_HV = (__u64)-32,
  731. PERF_CONTEXT_KERNEL = (__u64)-128,
  732. PERF_CONTEXT_USER = (__u64)-512,
  733. PERF_CONTEXT_GUEST = (__u64)-2048,
  734. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  735. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  736. PERF_CONTEXT_MAX = (__u64)-4095,
  737. };
  738. /**
  739. * PERF_RECORD_AUX::flags bits
  740. */
  741. #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
  742. #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
  743. #define PERF_FLAG_FD_NO_GROUP (1UL << 0)
  744. #define PERF_FLAG_FD_OUTPUT (1UL << 1)
  745. #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
  746. #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
  747. union perf_mem_data_src {
  748. __u64 val;
  749. struct {
  750. __u64 mem_op:5, /* type of opcode */
  751. mem_lvl:14, /* memory hierarchy level */
  752. mem_snoop:5, /* snoop mode */
  753. mem_lock:2, /* lock instr */
  754. mem_dtlb:7, /* tlb access */
  755. mem_rsvd:31;
  756. };
  757. };
  758. /* type of opcode (load/store/prefetch,code) */
  759. #define PERF_MEM_OP_NA 0x01 /* not available */
  760. #define PERF_MEM_OP_LOAD 0x02 /* load instruction */
  761. #define PERF_MEM_OP_STORE 0x04 /* store instruction */
  762. #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
  763. #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
  764. #define PERF_MEM_OP_SHIFT 0
  765. /* memory hierarchy (memory level, hit or miss) */
  766. #define PERF_MEM_LVL_NA 0x01 /* not available */
  767. #define PERF_MEM_LVL_HIT 0x02 /* hit level */
  768. #define PERF_MEM_LVL_MISS 0x04 /* miss level */
  769. #define PERF_MEM_LVL_L1 0x08 /* L1 */
  770. #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
  771. #define PERF_MEM_LVL_L2 0x20 /* L2 */
  772. #define PERF_MEM_LVL_L3 0x40 /* L3 */
  773. #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
  774. #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
  775. #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
  776. #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
  777. #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
  778. #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
  779. #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
  780. #define PERF_MEM_LVL_SHIFT 5
  781. /* snoop mode */
  782. #define PERF_MEM_SNOOP_NA 0x01 /* not available */
  783. #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
  784. #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
  785. #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
  786. #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
  787. #define PERF_MEM_SNOOP_SHIFT 19
  788. /* locked instruction */
  789. #define PERF_MEM_LOCK_NA 0x01 /* not available */
  790. #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
  791. #define PERF_MEM_LOCK_SHIFT 24
  792. /* TLB access */
  793. #define PERF_MEM_TLB_NA 0x01 /* not available */
  794. #define PERF_MEM_TLB_HIT 0x02 /* hit level */
  795. #define PERF_MEM_TLB_MISS 0x04 /* miss level */
  796. #define PERF_MEM_TLB_L1 0x08 /* L1 */
  797. #define PERF_MEM_TLB_L2 0x10 /* L2 */
  798. #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
  799. #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
  800. #define PERF_MEM_TLB_SHIFT 26
  801. #define PERF_MEM_S(a, s) \
  802. (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
  803. /*
  804. * single taken branch record layout:
  805. *
  806. * from: source instruction (may not always be a branch insn)
  807. * to: branch target
  808. * mispred: branch target was mispredicted
  809. * predicted: branch target was predicted
  810. *
  811. * support for mispred, predicted is optional. In case it
  812. * is not supported mispred = predicted = 0.
  813. *
  814. * in_tx: running in a hardware transaction
  815. * abort: aborting a hardware transaction
  816. */
  817. struct perf_branch_entry {
  818. __u64 from;
  819. __u64 to;
  820. __u64 mispred:1, /* target mispredicted */
  821. predicted:1,/* target predicted */
  822. in_tx:1, /* in transaction */
  823. abort:1, /* transaction abort */
  824. reserved:60;
  825. };
  826. #endif /* _UAPI_LINUX_PERF_EVENT_H */