event.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. #ifndef __PERF_RECORD_H
  2. #define __PERF_RECORD_H
  3. #include <limits.h>
  4. #include <stdio.h>
  5. #include "../perf.h"
  6. #include "map.h"
  7. #include "build-id.h"
  8. #include "perf_regs.h"
  9. struct mmap_event {
  10. struct perf_event_header header;
  11. u32 pid, tid;
  12. u64 start;
  13. u64 len;
  14. u64 pgoff;
  15. char filename[PATH_MAX];
  16. };
  17. struct mmap2_event {
  18. struct perf_event_header header;
  19. u32 pid, tid;
  20. u64 start;
  21. u64 len;
  22. u64 pgoff;
  23. u32 maj;
  24. u32 min;
  25. u64 ino;
  26. u64 ino_generation;
  27. u32 prot;
  28. u32 flags;
  29. char filename[PATH_MAX];
  30. };
  31. struct comm_event {
  32. struct perf_event_header header;
  33. u32 pid, tid;
  34. char comm[16];
  35. };
  36. struct fork_event {
  37. struct perf_event_header header;
  38. u32 pid, ppid;
  39. u32 tid, ptid;
  40. u64 time;
  41. };
  42. struct lost_event {
  43. struct perf_event_header header;
  44. u64 id;
  45. u64 lost;
  46. };
  47. /*
  48. * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  49. */
  50. struct read_event {
  51. struct perf_event_header header;
  52. u32 pid, tid;
  53. u64 value;
  54. u64 time_enabled;
  55. u64 time_running;
  56. u64 id;
  57. };
  58. struct throttle_event {
  59. struct perf_event_header header;
  60. u64 time;
  61. u64 id;
  62. u64 stream_id;
  63. };
  64. #define PERF_SAMPLE_MASK \
  65. (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
  66. PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
  67. PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
  68. PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
  69. PERF_SAMPLE_IDENTIFIER)
  70. /* perf sample has 16 bits size limit */
  71. #define PERF_SAMPLE_MAX_SIZE (1 << 16)
  72. struct sample_event {
  73. struct perf_event_header header;
  74. u64 array[];
  75. };
  76. struct regs_dump {
  77. u64 abi;
  78. u64 mask;
  79. u64 *regs;
  80. /* Cached values/mask filled by first register access. */
  81. u64 cache_regs[PERF_REGS_MAX];
  82. u64 cache_mask;
  83. };
  84. struct stack_dump {
  85. u16 offset;
  86. u64 size;
  87. char *data;
  88. };
  89. struct sample_read_value {
  90. u64 value;
  91. u64 id;
  92. };
  93. struct sample_read {
  94. u64 time_enabled;
  95. u64 time_running;
  96. union {
  97. struct {
  98. u64 nr;
  99. struct sample_read_value *values;
  100. } group;
  101. struct sample_read_value one;
  102. };
  103. };
  104. struct ip_callchain {
  105. u64 nr;
  106. u64 ips[0];
  107. };
  108. struct branch_flags {
  109. u64 mispred:1;
  110. u64 predicted:1;
  111. u64 in_tx:1;
  112. u64 abort:1;
  113. u64 reserved:60;
  114. };
  115. struct branch_entry {
  116. u64 from;
  117. u64 to;
  118. struct branch_flags flags;
  119. };
  120. struct branch_stack {
  121. u64 nr;
  122. struct branch_entry entries[0];
  123. };
  124. enum {
  125. PERF_IP_FLAG_BRANCH = 1ULL << 0,
  126. PERF_IP_FLAG_CALL = 1ULL << 1,
  127. PERF_IP_FLAG_RETURN = 1ULL << 2,
  128. PERF_IP_FLAG_CONDITIONAL = 1ULL << 3,
  129. PERF_IP_FLAG_SYSCALLRET = 1ULL << 4,
  130. PERF_IP_FLAG_ASYNC = 1ULL << 5,
  131. PERF_IP_FLAG_INTERRUPT = 1ULL << 6,
  132. PERF_IP_FLAG_TX_ABORT = 1ULL << 7,
  133. PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
  134. PERF_IP_FLAG_TRACE_END = 1ULL << 9,
  135. PERF_IP_FLAG_IN_TX = 1ULL << 10,
  136. };
  137. #define PERF_BRANCH_MASK (\
  138. PERF_IP_FLAG_BRANCH |\
  139. PERF_IP_FLAG_CALL |\
  140. PERF_IP_FLAG_RETURN |\
  141. PERF_IP_FLAG_CONDITIONAL |\
  142. PERF_IP_FLAG_SYSCALLRET |\
  143. PERF_IP_FLAG_ASYNC |\
  144. PERF_IP_FLAG_INTERRUPT |\
  145. PERF_IP_FLAG_TX_ABORT |\
  146. PERF_IP_FLAG_TRACE_BEGIN |\
  147. PERF_IP_FLAG_TRACE_END)
  148. struct perf_sample {
  149. u64 ip;
  150. u32 pid, tid;
  151. u64 time;
  152. u64 addr;
  153. u64 id;
  154. u64 stream_id;
  155. u64 period;
  156. u64 weight;
  157. u64 transaction;
  158. u32 cpu;
  159. u32 raw_size;
  160. u64 data_src;
  161. u32 flags;
  162. u16 insn_len;
  163. void *raw_data;
  164. struct ip_callchain *callchain;
  165. struct branch_stack *branch_stack;
  166. struct regs_dump user_regs;
  167. struct regs_dump intr_regs;
  168. struct stack_dump user_stack;
  169. struct sample_read read;
  170. };
  171. #define PERF_MEM_DATA_SRC_NONE \
  172. (PERF_MEM_S(OP, NA) |\
  173. PERF_MEM_S(LVL, NA) |\
  174. PERF_MEM_S(SNOOP, NA) |\
  175. PERF_MEM_S(LOCK, NA) |\
  176. PERF_MEM_S(TLB, NA))
  177. struct build_id_event {
  178. struct perf_event_header header;
  179. pid_t pid;
  180. u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
  181. char filename[];
  182. };
  183. enum perf_user_event_type { /* above any possible kernel type */
  184. PERF_RECORD_USER_TYPE_START = 64,
  185. PERF_RECORD_HEADER_ATTR = 64,
  186. PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
  187. PERF_RECORD_HEADER_TRACING_DATA = 66,
  188. PERF_RECORD_HEADER_BUILD_ID = 67,
  189. PERF_RECORD_FINISHED_ROUND = 68,
  190. PERF_RECORD_ID_INDEX = 69,
  191. PERF_RECORD_HEADER_MAX
  192. };
  193. /*
  194. * The kernel collects the number of events it couldn't send in a stretch and
  195. * when possible sends this number in a PERF_RECORD_LOST event. The number of
  196. * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
  197. * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
  198. * the sum of all struct lost_event.lost fields reported.
  199. *
  200. * The total_period is needed because by default auto-freq is used, so
  201. * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
  202. * the total number of low level events, it is necessary to to sum all struct
  203. * sample_event.period and stash the result in total_period.
  204. */
  205. struct events_stats {
  206. u64 total_period;
  207. u64 total_non_filtered_period;
  208. u64 total_lost;
  209. u64 total_invalid_chains;
  210. u32 nr_events[PERF_RECORD_HEADER_MAX];
  211. u32 nr_non_filtered_samples;
  212. u32 nr_lost_warned;
  213. u32 nr_unknown_events;
  214. u32 nr_invalid_chains;
  215. u32 nr_unknown_id;
  216. u32 nr_unprocessable_samples;
  217. u32 nr_unordered_events;
  218. };
  219. struct attr_event {
  220. struct perf_event_header header;
  221. struct perf_event_attr attr;
  222. u64 id[];
  223. };
  224. #define MAX_EVENT_NAME 64
  225. struct perf_trace_event_type {
  226. u64 event_id;
  227. char name[MAX_EVENT_NAME];
  228. };
  229. struct event_type_event {
  230. struct perf_event_header header;
  231. struct perf_trace_event_type event_type;
  232. };
  233. struct tracing_data_event {
  234. struct perf_event_header header;
  235. u32 size;
  236. };
  237. struct id_index_entry {
  238. u64 id;
  239. u64 idx;
  240. u64 cpu;
  241. u64 tid;
  242. };
  243. struct id_index_event {
  244. struct perf_event_header header;
  245. u64 nr;
  246. struct id_index_entry entries[0];
  247. };
  248. union perf_event {
  249. struct perf_event_header header;
  250. struct mmap_event mmap;
  251. struct mmap2_event mmap2;
  252. struct comm_event comm;
  253. struct fork_event fork;
  254. struct lost_event lost;
  255. struct read_event read;
  256. struct throttle_event throttle;
  257. struct sample_event sample;
  258. struct attr_event attr;
  259. struct event_type_event event_type;
  260. struct tracing_data_event tracing_data;
  261. struct build_id_event build_id;
  262. struct id_index_event id_index;
  263. };
  264. void perf_event__print_totals(void);
  265. struct perf_tool;
  266. struct thread_map;
  267. typedef int (*perf_event__handler_t)(struct perf_tool *tool,
  268. union perf_event *event,
  269. struct perf_sample *sample,
  270. struct machine *machine);
  271. int perf_event__synthesize_thread_map(struct perf_tool *tool,
  272. struct thread_map *threads,
  273. perf_event__handler_t process,
  274. struct machine *machine, bool mmap_data);
  275. int perf_event__synthesize_threads(struct perf_tool *tool,
  276. perf_event__handler_t process,
  277. struct machine *machine, bool mmap_data);
  278. int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
  279. perf_event__handler_t process,
  280. struct machine *machine);
  281. int perf_event__synthesize_modules(struct perf_tool *tool,
  282. perf_event__handler_t process,
  283. struct machine *machine);
  284. int perf_event__process_comm(struct perf_tool *tool,
  285. union perf_event *event,
  286. struct perf_sample *sample,
  287. struct machine *machine);
  288. int perf_event__process_lost(struct perf_tool *tool,
  289. union perf_event *event,
  290. struct perf_sample *sample,
  291. struct machine *machine);
  292. int perf_event__process_mmap(struct perf_tool *tool,
  293. union perf_event *event,
  294. struct perf_sample *sample,
  295. struct machine *machine);
  296. int perf_event__process_mmap2(struct perf_tool *tool,
  297. union perf_event *event,
  298. struct perf_sample *sample,
  299. struct machine *machine);
  300. int perf_event__process_fork(struct perf_tool *tool,
  301. union perf_event *event,
  302. struct perf_sample *sample,
  303. struct machine *machine);
  304. int perf_event__process_exit(struct perf_tool *tool,
  305. union perf_event *event,
  306. struct perf_sample *sample,
  307. struct machine *machine);
  308. int perf_event__process(struct perf_tool *tool,
  309. union perf_event *event,
  310. struct perf_sample *sample,
  311. struct machine *machine);
  312. struct addr_location;
  313. int perf_event__preprocess_sample(const union perf_event *event,
  314. struct machine *machine,
  315. struct addr_location *al,
  316. struct perf_sample *sample);
  317. struct thread;
  318. bool is_bts_event(struct perf_event_attr *attr);
  319. bool sample_addr_correlates_sym(struct perf_event_attr *attr);
  320. void perf_event__preprocess_sample_addr(union perf_event *event,
  321. struct perf_sample *sample,
  322. struct thread *thread,
  323. struct addr_location *al);
  324. const char *perf_event__name(unsigned int id);
  325. size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
  326. u64 read_format);
  327. int perf_event__synthesize_sample(union perf_event *event, u64 type,
  328. u64 read_format,
  329. const struct perf_sample *sample,
  330. bool swapped);
  331. int perf_event__synthesize_mmap_events(struct perf_tool *tool,
  332. union perf_event *event,
  333. pid_t pid, pid_t tgid,
  334. perf_event__handler_t process,
  335. struct machine *machine,
  336. bool mmap_data);
  337. size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
  338. size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
  339. size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
  340. size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
  341. size_t perf_event__fprintf(union perf_event *event, FILE *fp);
  342. u64 kallsyms__get_function_start(const char *kallsyms_filename,
  343. const char *symbol_name);
  344. #endif /* __PERF_RECORD_H */