event.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. #ifndef __PERF_RECORD_H
  2. #define __PERF_RECORD_H
  3. #include <limits.h>
  4. #include <stdio.h>
  5. #include "../perf.h"
  6. #include "map.h"
  7. #include "build-id.h"
  8. #include "perf_regs.h"
  9. struct mmap_event {
  10. struct perf_event_header header;
  11. u32 pid, tid;
  12. u64 start;
  13. u64 len;
  14. u64 pgoff;
  15. char filename[PATH_MAX];
  16. };
  17. struct mmap2_event {
  18. struct perf_event_header header;
  19. u32 pid, tid;
  20. u64 start;
  21. u64 len;
  22. u64 pgoff;
  23. u32 maj;
  24. u32 min;
  25. u64 ino;
  26. u64 ino_generation;
  27. u32 prot;
  28. u32 flags;
  29. char filename[PATH_MAX];
  30. };
  31. struct comm_event {
  32. struct perf_event_header header;
  33. u32 pid, tid;
  34. char comm[16];
  35. };
  36. struct fork_event {
  37. struct perf_event_header header;
  38. u32 pid, ppid;
  39. u32 tid, ptid;
  40. u64 time;
  41. };
  42. struct lost_event {
  43. struct perf_event_header header;
  44. u64 id;
  45. u64 lost;
  46. };
  47. struct lost_samples_event {
  48. struct perf_event_header header;
  49. u64 lost;
  50. };
  51. /*
  52. * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  53. */
  54. struct read_event {
  55. struct perf_event_header header;
  56. u32 pid, tid;
  57. u64 value;
  58. u64 time_enabled;
  59. u64 time_running;
  60. u64 id;
  61. };
  62. struct throttle_event {
  63. struct perf_event_header header;
  64. u64 time;
  65. u64 id;
  66. u64 stream_id;
  67. };
  68. #define PERF_SAMPLE_MASK \
  69. (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
  70. PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
  71. PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
  72. PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
  73. PERF_SAMPLE_IDENTIFIER)
  74. /* perf sample has 16 bits size limit */
  75. #define PERF_SAMPLE_MAX_SIZE (1 << 16)
  76. struct sample_event {
  77. struct perf_event_header header;
  78. u64 array[];
  79. };
  80. struct regs_dump {
  81. u64 abi;
  82. u64 mask;
  83. u64 *regs;
  84. /* Cached values/mask filled by first register access. */
  85. u64 cache_regs[PERF_REGS_MAX];
  86. u64 cache_mask;
  87. };
  88. struct stack_dump {
  89. u16 offset;
  90. u64 size;
  91. char *data;
  92. };
  93. struct sample_read_value {
  94. u64 value;
  95. u64 id;
  96. };
  97. struct sample_read {
  98. u64 time_enabled;
  99. u64 time_running;
  100. union {
  101. struct {
  102. u64 nr;
  103. struct sample_read_value *values;
  104. } group;
  105. struct sample_read_value one;
  106. };
  107. };
  108. struct ip_callchain {
  109. u64 nr;
  110. u64 ips[0];
  111. };
  112. struct branch_flags {
  113. u64 mispred:1;
  114. u64 predicted:1;
  115. u64 in_tx:1;
  116. u64 abort:1;
  117. u64 cycles:16;
  118. u64 reserved:44;
  119. };
  120. struct branch_entry {
  121. u64 from;
  122. u64 to;
  123. struct branch_flags flags;
  124. };
  125. struct branch_stack {
  126. u64 nr;
  127. struct branch_entry entries[0];
  128. };
  129. enum {
  130. PERF_IP_FLAG_BRANCH = 1ULL << 0,
  131. PERF_IP_FLAG_CALL = 1ULL << 1,
  132. PERF_IP_FLAG_RETURN = 1ULL << 2,
  133. PERF_IP_FLAG_CONDITIONAL = 1ULL << 3,
  134. PERF_IP_FLAG_SYSCALLRET = 1ULL << 4,
  135. PERF_IP_FLAG_ASYNC = 1ULL << 5,
  136. PERF_IP_FLAG_INTERRUPT = 1ULL << 6,
  137. PERF_IP_FLAG_TX_ABORT = 1ULL << 7,
  138. PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
  139. PERF_IP_FLAG_TRACE_END = 1ULL << 9,
  140. PERF_IP_FLAG_IN_TX = 1ULL << 10,
  141. };
  142. #define PERF_IP_FLAG_CHARS "bcrosyiABEx"
  143. #define PERF_BRANCH_MASK (\
  144. PERF_IP_FLAG_BRANCH |\
  145. PERF_IP_FLAG_CALL |\
  146. PERF_IP_FLAG_RETURN |\
  147. PERF_IP_FLAG_CONDITIONAL |\
  148. PERF_IP_FLAG_SYSCALLRET |\
  149. PERF_IP_FLAG_ASYNC |\
  150. PERF_IP_FLAG_INTERRUPT |\
  151. PERF_IP_FLAG_TX_ABORT |\
  152. PERF_IP_FLAG_TRACE_BEGIN |\
  153. PERF_IP_FLAG_TRACE_END)
  154. struct perf_sample {
  155. u64 ip;
  156. u32 pid, tid;
  157. u64 time;
  158. u64 addr;
  159. u64 id;
  160. u64 stream_id;
  161. u64 period;
  162. u64 weight;
  163. u64 transaction;
  164. u32 cpu;
  165. u32 raw_size;
  166. u64 data_src;
  167. u32 flags;
  168. u16 insn_len;
  169. u8 cpumode;
  170. void *raw_data;
  171. struct ip_callchain *callchain;
  172. struct branch_stack *branch_stack;
  173. struct regs_dump user_regs;
  174. struct regs_dump intr_regs;
  175. struct stack_dump user_stack;
  176. struct sample_read read;
  177. };
  178. #define PERF_MEM_DATA_SRC_NONE \
  179. (PERF_MEM_S(OP, NA) |\
  180. PERF_MEM_S(LVL, NA) |\
  181. PERF_MEM_S(SNOOP, NA) |\
  182. PERF_MEM_S(LOCK, NA) |\
  183. PERF_MEM_S(TLB, NA))
  184. struct build_id_event {
  185. struct perf_event_header header;
  186. pid_t pid;
  187. u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
  188. char filename[];
  189. };
  190. enum perf_user_event_type { /* above any possible kernel type */
  191. PERF_RECORD_USER_TYPE_START = 64,
  192. PERF_RECORD_HEADER_ATTR = 64,
  193. PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
  194. PERF_RECORD_HEADER_TRACING_DATA = 66,
  195. PERF_RECORD_HEADER_BUILD_ID = 67,
  196. PERF_RECORD_FINISHED_ROUND = 68,
  197. PERF_RECORD_ID_INDEX = 69,
  198. PERF_RECORD_AUXTRACE_INFO = 70,
  199. PERF_RECORD_AUXTRACE = 71,
  200. PERF_RECORD_AUXTRACE_ERROR = 72,
  201. PERF_RECORD_THREAD_MAP = 73,
  202. PERF_RECORD_CPU_MAP = 74,
  203. PERF_RECORD_STAT_CONFIG = 75,
  204. PERF_RECORD_STAT = 76,
  205. PERF_RECORD_STAT_ROUND = 77,
  206. PERF_RECORD_EVENT_UPDATE = 78,
  207. PERF_RECORD_HEADER_MAX
  208. };
  209. enum auxtrace_error_type {
  210. PERF_AUXTRACE_ERROR_ITRACE = 1,
  211. PERF_AUXTRACE_ERROR_MAX
  212. };
  213. /*
  214. * The kernel collects the number of events it couldn't send in a stretch and
  215. * when possible sends this number in a PERF_RECORD_LOST event. The number of
  216. * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
  217. * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
  218. * the sum of all struct lost_event.lost fields reported.
  219. *
  220. * The kernel discards mixed up samples and sends the number in a
  221. * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
  222. * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
  223. * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
  224. * all struct lost_samples_event.lost fields reported.
  225. *
  226. * The total_period is needed because by default auto-freq is used, so
  227. * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
  228. * the total number of low level events, it is necessary to to sum all struct
  229. * sample_event.period and stash the result in total_period.
  230. */
  231. struct events_stats {
  232. u64 total_period;
  233. u64 total_non_filtered_period;
  234. u64 total_lost;
  235. u64 total_lost_samples;
  236. u64 total_aux_lost;
  237. u64 total_invalid_chains;
  238. u32 nr_events[PERF_RECORD_HEADER_MAX];
  239. u32 nr_non_filtered_samples;
  240. u32 nr_lost_warned;
  241. u32 nr_unknown_events;
  242. u32 nr_invalid_chains;
  243. u32 nr_unknown_id;
  244. u32 nr_unprocessable_samples;
  245. u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
  246. u32 nr_proc_map_timeout;
  247. };
  248. enum {
  249. PERF_CPU_MAP__CPUS = 0,
  250. PERF_CPU_MAP__MASK = 1,
  251. };
  252. struct cpu_map_entries {
  253. u16 nr;
  254. u16 cpu[];
  255. };
  256. struct cpu_map_mask {
  257. u16 nr;
  258. u16 long_size;
  259. unsigned long mask[];
  260. };
  261. struct cpu_map_data {
  262. u16 type;
  263. char data[];
  264. };
  265. struct cpu_map_event {
  266. struct perf_event_header header;
  267. struct cpu_map_data data;
  268. };
  269. struct attr_event {
  270. struct perf_event_header header;
  271. struct perf_event_attr attr;
  272. u64 id[];
  273. };
  274. enum {
  275. PERF_EVENT_UPDATE__UNIT = 0,
  276. PERF_EVENT_UPDATE__SCALE = 1,
  277. PERF_EVENT_UPDATE__NAME = 2,
  278. PERF_EVENT_UPDATE__CPUS = 3,
  279. };
  280. struct event_update_event_cpus {
  281. struct cpu_map_data cpus;
  282. };
  283. struct event_update_event_scale {
  284. double scale;
  285. };
  286. struct event_update_event {
  287. struct perf_event_header header;
  288. u64 type;
  289. u64 id;
  290. char data[];
  291. };
  292. #define MAX_EVENT_NAME 64
  293. struct perf_trace_event_type {
  294. u64 event_id;
  295. char name[MAX_EVENT_NAME];
  296. };
  297. struct event_type_event {
  298. struct perf_event_header header;
  299. struct perf_trace_event_type event_type;
  300. };
  301. struct tracing_data_event {
  302. struct perf_event_header header;
  303. u32 size;
  304. };
  305. struct id_index_entry {
  306. u64 id;
  307. u64 idx;
  308. u64 cpu;
  309. u64 tid;
  310. };
  311. struct id_index_event {
  312. struct perf_event_header header;
  313. u64 nr;
  314. struct id_index_entry entries[0];
  315. };
  316. struct auxtrace_info_event {
  317. struct perf_event_header header;
  318. u32 type;
  319. u32 reserved__; /* For alignment */
  320. u64 priv[];
  321. };
  322. struct auxtrace_event {
  323. struct perf_event_header header;
  324. u64 size;
  325. u64 offset;
  326. u64 reference;
  327. u32 idx;
  328. u32 tid;
  329. u32 cpu;
  330. u32 reserved__; /* For alignment */
  331. };
  332. #define MAX_AUXTRACE_ERROR_MSG 64
  333. struct auxtrace_error_event {
  334. struct perf_event_header header;
  335. u32 type;
  336. u32 code;
  337. u32 cpu;
  338. u32 pid;
  339. u32 tid;
  340. u32 reserved__; /* For alignment */
  341. u64 ip;
  342. char msg[MAX_AUXTRACE_ERROR_MSG];
  343. };
  344. struct aux_event {
  345. struct perf_event_header header;
  346. u64 aux_offset;
  347. u64 aux_size;
  348. u64 flags;
  349. };
  350. struct itrace_start_event {
  351. struct perf_event_header header;
  352. u32 pid, tid;
  353. };
  354. struct context_switch_event {
  355. struct perf_event_header header;
  356. u32 next_prev_pid;
  357. u32 next_prev_tid;
  358. };
  359. struct thread_map_event_entry {
  360. u64 pid;
  361. char comm[16];
  362. };
  363. struct thread_map_event {
  364. struct perf_event_header header;
  365. u64 nr;
  366. struct thread_map_event_entry entries[];
  367. };
  368. enum {
  369. PERF_STAT_CONFIG_TERM__AGGR_MODE = 0,
  370. PERF_STAT_CONFIG_TERM__INTERVAL = 1,
  371. PERF_STAT_CONFIG_TERM__SCALE = 2,
  372. PERF_STAT_CONFIG_TERM__MAX = 3,
  373. };
  374. struct stat_config_event_entry {
  375. u64 tag;
  376. u64 val;
  377. };
  378. struct stat_config_event {
  379. struct perf_event_header header;
  380. u64 nr;
  381. struct stat_config_event_entry data[];
  382. };
  383. struct stat_event {
  384. struct perf_event_header header;
  385. u64 id;
  386. u32 cpu;
  387. u32 thread;
  388. union {
  389. struct {
  390. u64 val;
  391. u64 ena;
  392. u64 run;
  393. };
  394. u64 values[3];
  395. };
  396. };
  397. enum {
  398. PERF_STAT_ROUND_TYPE__INTERVAL = 0,
  399. PERF_STAT_ROUND_TYPE__FINAL = 1,
  400. };
  401. struct stat_round_event {
  402. struct perf_event_header header;
  403. u64 type;
  404. u64 time;
  405. };
  406. union perf_event {
  407. struct perf_event_header header;
  408. struct mmap_event mmap;
  409. struct mmap2_event mmap2;
  410. struct comm_event comm;
  411. struct fork_event fork;
  412. struct lost_event lost;
  413. struct lost_samples_event lost_samples;
  414. struct read_event read;
  415. struct throttle_event throttle;
  416. struct sample_event sample;
  417. struct attr_event attr;
  418. struct event_update_event event_update;
  419. struct event_type_event event_type;
  420. struct tracing_data_event tracing_data;
  421. struct build_id_event build_id;
  422. struct id_index_event id_index;
  423. struct auxtrace_info_event auxtrace_info;
  424. struct auxtrace_event auxtrace;
  425. struct auxtrace_error_event auxtrace_error;
  426. struct aux_event aux;
  427. struct itrace_start_event itrace_start;
  428. struct context_switch_event context_switch;
  429. struct thread_map_event thread_map;
  430. struct cpu_map_event cpu_map;
  431. struct stat_config_event stat_config;
  432. struct stat_event stat;
  433. struct stat_round_event stat_round;
  434. };
  435. void perf_event__print_totals(void);
  436. struct perf_tool;
  437. struct thread_map;
  438. struct cpu_map;
  439. struct perf_stat_config;
  440. struct perf_counts_values;
  441. typedef int (*perf_event__handler_t)(struct perf_tool *tool,
  442. union perf_event *event,
  443. struct perf_sample *sample,
  444. struct machine *machine);
  445. int perf_event__synthesize_thread_map(struct perf_tool *tool,
  446. struct thread_map *threads,
  447. perf_event__handler_t process,
  448. struct machine *machine, bool mmap_data,
  449. unsigned int proc_map_timeout);
  450. int perf_event__synthesize_thread_map2(struct perf_tool *tool,
  451. struct thread_map *threads,
  452. perf_event__handler_t process,
  453. struct machine *machine);
  454. int perf_event__synthesize_cpu_map(struct perf_tool *tool,
  455. struct cpu_map *cpus,
  456. perf_event__handler_t process,
  457. struct machine *machine);
  458. int perf_event__synthesize_threads(struct perf_tool *tool,
  459. perf_event__handler_t process,
  460. struct machine *machine, bool mmap_data,
  461. unsigned int proc_map_timeout);
  462. int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
  463. perf_event__handler_t process,
  464. struct machine *machine);
  465. int perf_event__synthesize_stat_config(struct perf_tool *tool,
  466. struct perf_stat_config *config,
  467. perf_event__handler_t process,
  468. struct machine *machine);
  469. void perf_event__read_stat_config(struct perf_stat_config *config,
  470. struct stat_config_event *event);
  471. int perf_event__synthesize_stat(struct perf_tool *tool,
  472. u32 cpu, u32 thread, u64 id,
  473. struct perf_counts_values *count,
  474. perf_event__handler_t process,
  475. struct machine *machine);
  476. int perf_event__synthesize_stat_round(struct perf_tool *tool,
  477. u64 time, u64 type,
  478. perf_event__handler_t process,
  479. struct machine *machine);
  480. int perf_event__synthesize_modules(struct perf_tool *tool,
  481. perf_event__handler_t process,
  482. struct machine *machine);
  483. int perf_event__process_comm(struct perf_tool *tool,
  484. union perf_event *event,
  485. struct perf_sample *sample,
  486. struct machine *machine);
  487. int perf_event__process_lost(struct perf_tool *tool,
  488. union perf_event *event,
  489. struct perf_sample *sample,
  490. struct machine *machine);
  491. int perf_event__process_lost_samples(struct perf_tool *tool,
  492. union perf_event *event,
  493. struct perf_sample *sample,
  494. struct machine *machine);
  495. int perf_event__process_aux(struct perf_tool *tool,
  496. union perf_event *event,
  497. struct perf_sample *sample,
  498. struct machine *machine);
  499. int perf_event__process_itrace_start(struct perf_tool *tool,
  500. union perf_event *event,
  501. struct perf_sample *sample,
  502. struct machine *machine);
  503. int perf_event__process_switch(struct perf_tool *tool,
  504. union perf_event *event,
  505. struct perf_sample *sample,
  506. struct machine *machine);
  507. int perf_event__process_mmap(struct perf_tool *tool,
  508. union perf_event *event,
  509. struct perf_sample *sample,
  510. struct machine *machine);
  511. int perf_event__process_mmap2(struct perf_tool *tool,
  512. union perf_event *event,
  513. struct perf_sample *sample,
  514. struct machine *machine);
  515. int perf_event__process_fork(struct perf_tool *tool,
  516. union perf_event *event,
  517. struct perf_sample *sample,
  518. struct machine *machine);
  519. int perf_event__process_exit(struct perf_tool *tool,
  520. union perf_event *event,
  521. struct perf_sample *sample,
  522. struct machine *machine);
  523. int perf_event__process(struct perf_tool *tool,
  524. union perf_event *event,
  525. struct perf_sample *sample,
  526. struct machine *machine);
  527. struct addr_location;
  528. int machine__resolve(struct machine *machine, struct addr_location *al,
  529. struct perf_sample *sample);
  530. void addr_location__put(struct addr_location *al);
  531. struct thread;
  532. bool is_bts_event(struct perf_event_attr *attr);
  533. bool sample_addr_correlates_sym(struct perf_event_attr *attr);
  534. void thread__resolve(struct thread *thread, struct addr_location *al,
  535. struct perf_sample *sample);
  536. const char *perf_event__name(unsigned int id);
  537. size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
  538. u64 read_format);
  539. int perf_event__synthesize_sample(union perf_event *event, u64 type,
  540. u64 read_format,
  541. const struct perf_sample *sample,
  542. bool swapped);
  543. pid_t perf_event__synthesize_comm(struct perf_tool *tool,
  544. union perf_event *event, pid_t pid,
  545. perf_event__handler_t process,
  546. struct machine *machine);
  547. int perf_event__synthesize_mmap_events(struct perf_tool *tool,
  548. union perf_event *event,
  549. pid_t pid, pid_t tgid,
  550. perf_event__handler_t process,
  551. struct machine *machine,
  552. bool mmap_data,
  553. unsigned int proc_map_timeout);
  554. size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
  555. size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
  556. size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
  557. size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
  558. size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
  559. size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
  560. size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
  561. size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
  562. size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
  563. size_t perf_event__fprintf(union perf_event *event, FILE *fp);
  564. u64 kallsyms__get_function_start(const char *kallsyms_filename,
  565. const char *symbol_name);
  566. void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
  567. void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
  568. u16 type, int max);
  569. #endif /* __PERF_RECORD_H */