ftrace.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. /*
  2. * Stage 1 of the trace events.
  3. *
  4. * Override the macros in <trace/trace_events.h> to include the following:
  5. *
  6. * struct ftrace_raw_<call> {
  7. * struct trace_entry ent;
  8. * <type> <item>;
  9. * <type2> <item2>[<len>];
  10. * [...]
  11. * };
  12. *
  13. * The <type> <item> is created by the __field(type, item) macro or
  14. * the __array(type2, item2, len) macro.
  15. * We simply do "type item;", and that will create the fields
  16. * in the structure.
  17. */
  18. #include <linux/ftrace_event.h>
  19. /*
  20. * DECLARE_EVENT_CLASS can be used to add a generic function
  21. * handlers for events. That is, if all events have the same
  22. * parameters and just have distinct trace points.
  23. * Each tracepoint can be defined with DEFINE_EVENT and that
  24. * will map the DECLARE_EVENT_CLASS to the tracepoint.
  25. *
  26. * TRACE_EVENT is a one to one mapping between tracepoint and template.
  27. */
  28. #undef TRACE_EVENT
  29. #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  30. DECLARE_EVENT_CLASS(name, \
  31. PARAMS(proto), \
  32. PARAMS(args), \
  33. PARAMS(tstruct), \
  34. PARAMS(assign), \
  35. PARAMS(print)); \
  36. DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  37. #undef __field
  38. #define __field(type, item) type item;
  39. #undef __field_ext
  40. #define __field_ext(type, item, filter_type) type item;
  41. #undef __array
  42. #define __array(type, item, len) type item[len];
  43. #undef __dynamic_array
  44. #define __dynamic_array(type, item, len) u32 __data_loc_##item;
  45. #undef __string
  46. #define __string(item, src) __dynamic_array(char, item, -1)
  47. #undef __bitmask
  48. #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
  49. #undef TP_STRUCT__entry
  50. #define TP_STRUCT__entry(args...) args
  51. #undef DECLARE_EVENT_CLASS
  52. #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
  53. struct ftrace_raw_##name { \
  54. struct trace_entry ent; \
  55. tstruct \
  56. char __data[0]; \
  57. }; \
  58. \
  59. static struct ftrace_event_class event_class_##name;
  60. #undef DEFINE_EVENT
  61. #define DEFINE_EVENT(template, name, proto, args) \
  62. static struct ftrace_event_call __used \
  63. __attribute__((__aligned__(4))) event_##name
  64. #undef DEFINE_EVENT_FN
  65. #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
  66. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  67. #undef DEFINE_EVENT_PRINT
  68. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  69. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  70. /* Callbacks are meaningless to ftrace. */
  71. #undef TRACE_EVENT_FN
  72. #define TRACE_EVENT_FN(name, proto, args, tstruct, \
  73. assign, print, reg, unreg) \
  74. TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
  75. PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
  76. #undef TRACE_EVENT_FLAGS
  77. #define TRACE_EVENT_FLAGS(name, value) \
  78. __TRACE_EVENT_FLAGS(name, value)
  79. #undef TRACE_EVENT_PERF_PERM
  80. #define TRACE_EVENT_PERF_PERM(name, expr...) \
  81. __TRACE_EVENT_PERF_PERM(name, expr)
  82. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  83. /*
  84. * Stage 2 of the trace events.
  85. *
  86. * Include the following:
  87. *
  88. * struct ftrace_data_offsets_<call> {
  89. * u32 <item1>;
  90. * u32 <item2>;
  91. * [...]
  92. * };
  93. *
  94. * The __dynamic_array() macro will create each u32 <item>, this is
  95. * to keep the offset of each array from the beginning of the event.
  96. * The size of an array is also encoded, in the higher 16 bits of <item>.
  97. */
  98. #undef __field
  99. #define __field(type, item)
  100. #undef __field_ext
  101. #define __field_ext(type, item, filter_type)
  102. #undef __array
  103. #define __array(type, item, len)
  104. #undef __dynamic_array
  105. #define __dynamic_array(type, item, len) u32 item;
  106. #undef __string
  107. #define __string(item, src) __dynamic_array(char, item, -1)
  108. #undef __bitmask
  109. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
  110. #undef DECLARE_EVENT_CLASS
  111. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  112. struct ftrace_data_offsets_##call { \
  113. tstruct; \
  114. };
  115. #undef DEFINE_EVENT
  116. #define DEFINE_EVENT(template, name, proto, args)
  117. #undef DEFINE_EVENT_PRINT
  118. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  119. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  120. #undef TRACE_EVENT_FLAGS
  121. #define TRACE_EVENT_FLAGS(event, flag)
  122. #undef TRACE_EVENT_PERF_PERM
  123. #define TRACE_EVENT_PERF_PERM(event, expr...)
  124. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  125. /*
  126. * Stage 3 of the trace events.
  127. *
  128. * Override the macros in <trace/trace_events.h> to include the following:
  129. *
  130. * enum print_line_t
  131. * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
  132. * {
  133. * struct trace_seq *s = &iter->seq;
  134. * struct ftrace_raw_<call> *field; <-- defined in stage 1
  135. * struct trace_entry *entry;
  136. * struct trace_seq *p = &iter->tmp_seq;
  137. * int ret;
  138. *
  139. * entry = iter->ent;
  140. *
  141. * if (entry->type != event_<call>->event.type) {
  142. * WARN_ON_ONCE(1);
  143. * return TRACE_TYPE_UNHANDLED;
  144. * }
  145. *
  146. * field = (typeof(field))entry;
  147. *
  148. * trace_seq_init(p);
  149. * ret = trace_seq_printf(s, "%s: ", <call>);
  150. * if (ret)
  151. * ret = trace_seq_printf(s, <TP_printk> "\n");
  152. * if (!ret)
  153. * return TRACE_TYPE_PARTIAL_LINE;
  154. *
  155. * return TRACE_TYPE_HANDLED;
  156. * }
  157. *
  158. * This is the method used to print the raw event to the trace
  159. * output format. Note, this is not needed if the data is read
  160. * in binary.
  161. */
  162. #undef __entry
  163. #define __entry field
  164. #undef TP_printk
  165. #define TP_printk(fmt, args...) fmt "\n", args
  166. #undef __get_dynamic_array
  167. #define __get_dynamic_array(field) \
  168. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  169. #undef __get_dynamic_array_len
  170. #define __get_dynamic_array_len(field) \
  171. ((__entry->__data_loc_##field >> 16) & 0xffff)
  172. #undef __get_str
  173. #define __get_str(field) (char *)__get_dynamic_array(field)
  174. #undef __get_bitmask
  175. #define __get_bitmask(field) \
  176. ({ \
  177. void *__bitmask = __get_dynamic_array(field); \
  178. unsigned int __bitmask_size; \
  179. __bitmask_size = __get_dynamic_array_len(field); \
  180. ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
  181. })
  182. #undef __print_flags
  183. #define __print_flags(flag, delim, flag_array...) \
  184. ({ \
  185. static const struct trace_print_flags __flags[] = \
  186. { flag_array, { -1, NULL }}; \
  187. ftrace_print_flags_seq(p, delim, flag, __flags); \
  188. })
  189. #undef __print_symbolic
  190. #define __print_symbolic(value, symbol_array...) \
  191. ({ \
  192. static const struct trace_print_flags symbols[] = \
  193. { symbol_array, { -1, NULL }}; \
  194. ftrace_print_symbols_seq(p, value, symbols); \
  195. })
  196. #undef __print_symbolic_u64
  197. #if BITS_PER_LONG == 32
  198. #define __print_symbolic_u64(value, symbol_array...) \
  199. ({ \
  200. static const struct trace_print_flags_u64 symbols[] = \
  201. { symbol_array, { -1, NULL } }; \
  202. ftrace_print_symbols_seq_u64(p, value, symbols); \
  203. })
  204. #else
  205. #define __print_symbolic_u64(value, symbol_array...) \
  206. __print_symbolic(value, symbol_array)
  207. #endif
  208. #undef __print_hex
  209. #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
  210. #undef DECLARE_EVENT_CLASS
  211. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  212. static notrace enum print_line_t \
  213. ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
  214. struct trace_event *trace_event) \
  215. { \
  216. struct trace_seq *s = &iter->seq; \
  217. struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
  218. struct ftrace_raw_##call *field; \
  219. int ret; \
  220. \
  221. field = (typeof(field))iter->ent; \
  222. \
  223. ret = ftrace_raw_output_prep(iter, trace_event); \
  224. if (ret) \
  225. return ret; \
  226. \
  227. ret = trace_seq_printf(s, print); \
  228. if (!ret) \
  229. return TRACE_TYPE_PARTIAL_LINE; \
  230. \
  231. return TRACE_TYPE_HANDLED; \
  232. } \
  233. static struct trace_event_functions ftrace_event_type_funcs_##call = { \
  234. .trace = ftrace_raw_output_##call, \
  235. };
  236. #undef DEFINE_EVENT_PRINT
  237. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  238. static notrace enum print_line_t \
  239. ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
  240. struct trace_event *event) \
  241. { \
  242. struct ftrace_raw_##template *field; \
  243. struct trace_entry *entry; \
  244. struct trace_seq *p = &iter->tmp_seq; \
  245. \
  246. entry = iter->ent; \
  247. \
  248. if (entry->type != event_##call.event.type) { \
  249. WARN_ON_ONCE(1); \
  250. return TRACE_TYPE_UNHANDLED; \
  251. } \
  252. \
  253. field = (typeof(field))entry; \
  254. \
  255. trace_seq_init(p); \
  256. return ftrace_output_call(iter, #call, print); \
  257. } \
  258. static struct trace_event_functions ftrace_event_type_funcs_##call = { \
  259. .trace = ftrace_raw_output_##call, \
  260. };
  261. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  262. #undef __field_ext
  263. #define __field_ext(type, item, filter_type) \
  264. ret = trace_define_field(event_call, #type, #item, \
  265. offsetof(typeof(field), item), \
  266. sizeof(field.item), \
  267. is_signed_type(type), filter_type); \
  268. if (ret) \
  269. return ret;
  270. #undef __field
  271. #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
  272. #undef __array
  273. #define __array(type, item, len) \
  274. do { \
  275. char *type_str = #type"["__stringify(len)"]"; \
  276. BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
  277. ret = trace_define_field(event_call, type_str, #item, \
  278. offsetof(typeof(field), item), \
  279. sizeof(field.item), \
  280. is_signed_type(type), FILTER_OTHER); \
  281. if (ret) \
  282. return ret; \
  283. } while (0);
  284. #undef __dynamic_array
  285. #define __dynamic_array(type, item, len) \
  286. ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
  287. offsetof(typeof(field), __data_loc_##item), \
  288. sizeof(field.__data_loc_##item), \
  289. is_signed_type(type), FILTER_OTHER);
  290. #undef __string
  291. #define __string(item, src) __dynamic_array(char, item, -1)
  292. #undef __bitmask
  293. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
  294. #undef DECLARE_EVENT_CLASS
  295. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
  296. static int notrace __init \
  297. ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
  298. { \
  299. struct ftrace_raw_##call field; \
  300. int ret; \
  301. \
  302. tstruct; \
  303. \
  304. return ret; \
  305. }
  306. #undef DEFINE_EVENT
  307. #define DEFINE_EVENT(template, name, proto, args)
  308. #undef DEFINE_EVENT_PRINT
  309. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  310. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  311. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  312. /*
  313. * remember the offset of each array from the beginning of the event.
  314. */
  315. #undef __entry
  316. #define __entry entry
  317. #undef __field
  318. #define __field(type, item)
  319. #undef __field_ext
  320. #define __field_ext(type, item, filter_type)
  321. #undef __array
  322. #define __array(type, item, len)
  323. #undef __dynamic_array
  324. #define __dynamic_array(type, item, len) \
  325. __item_length = (len) * sizeof(type); \
  326. __data_offsets->item = __data_size + \
  327. offsetof(typeof(*entry), __data); \
  328. __data_offsets->item |= __item_length << 16; \
  329. __data_size += __item_length;
  330. #undef __string
  331. #define __string(item, src) __dynamic_array(char, item, \
  332. strlen((src) ? (const char *)(src) : "(null)") + 1)
  333. /*
  334. * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
  335. * num_possible_cpus().
  336. */
  337. #define __bitmask_size_in_bytes_raw(nr_bits) \
  338. (((nr_bits) + 7) / 8)
  339. #define __bitmask_size_in_longs(nr_bits) \
  340. ((__bitmask_size_in_bytes_raw(nr_bits) + \
  341. ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
  342. /*
  343. * __bitmask_size_in_bytes is the number of bytes needed to hold
  344. * num_possible_cpus() padded out to the nearest long. This is what
  345. * is saved in the buffer, just to be consistent.
  346. */
  347. #define __bitmask_size_in_bytes(nr_bits) \
  348. (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
  349. #undef __bitmask
  350. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
  351. __bitmask_size_in_longs(nr_bits))
  352. #undef DECLARE_EVENT_CLASS
  353. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  354. static inline notrace int ftrace_get_offsets_##call( \
  355. struct ftrace_data_offsets_##call *__data_offsets, proto) \
  356. { \
  357. int __data_size = 0; \
  358. int __maybe_unused __item_length; \
  359. struct ftrace_raw_##call __maybe_unused *entry; \
  360. \
  361. tstruct; \
  362. \
  363. return __data_size; \
  364. }
  365. #undef DEFINE_EVENT
  366. #define DEFINE_EVENT(template, name, proto, args)
  367. #undef DEFINE_EVENT_PRINT
  368. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  369. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  370. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  371. /*
  372. * Stage 4 of the trace events.
  373. *
  374. * Override the macros in <trace/trace_events.h> to include the following:
  375. *
  376. * For those macros defined with TRACE_EVENT:
  377. *
  378. * static struct ftrace_event_call event_<call>;
  379. *
  380. * static void ftrace_raw_event_<call>(void *__data, proto)
  381. * {
  382. * struct ftrace_event_file *ftrace_file = __data;
  383. * struct ftrace_event_call *event_call = ftrace_file->event_call;
  384. * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
  385. * unsigned long eflags = ftrace_file->flags;
  386. * enum event_trigger_type __tt = ETT_NONE;
  387. * struct ring_buffer_event *event;
  388. * struct ftrace_raw_<call> *entry; <-- defined in stage 1
  389. * struct ring_buffer *buffer;
  390. * unsigned long irq_flags;
  391. * int __data_size;
  392. * int pc;
  393. *
  394. * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
  395. * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
  396. * event_triggers_call(ftrace_file, NULL);
  397. * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
  398. * return;
  399. * }
  400. *
  401. * local_save_flags(irq_flags);
  402. * pc = preempt_count();
  403. *
  404. * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
  405. *
  406. * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  407. * event_<call>->event.type,
  408. * sizeof(*entry) + __data_size,
  409. * irq_flags, pc);
  410. * if (!event)
  411. * return;
  412. * entry = ring_buffer_event_data(event);
  413. *
  414. * { <assign>; } <-- Here we assign the entries by the __field and
  415. * __array macros.
  416. *
  417. * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
  418. * __tt = event_triggers_call(ftrace_file, entry);
  419. *
  420. * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
  421. * &ftrace_file->flags))
  422. * ring_buffer_discard_commit(buffer, event);
  423. * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
  424. * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
  425. *
  426. * if (__tt)
  427. * event_triggers_post_call(ftrace_file, __tt);
  428. * }
  429. *
  430. * static struct trace_event ftrace_event_type_<call> = {
  431. * .trace = ftrace_raw_output_<call>, <-- stage 2
  432. * };
  433. *
  434. * static const char print_fmt_<call>[] = <TP_printk>;
  435. *
  436. * static struct ftrace_event_class __used event_class_<template> = {
  437. * .system = "<system>",
  438. * .define_fields = ftrace_define_fields_<call>,
  439. * .fields = LIST_HEAD_INIT(event_class_##call.fields),
  440. * .raw_init = trace_event_raw_init,
  441. * .probe = ftrace_raw_event_##call,
  442. * .reg = ftrace_event_reg,
  443. * };
  444. *
  445. * static struct ftrace_event_call event_<call> = {
  446. * .class = event_class_<template>,
  447. * {
  448. * .tp = &__tracepoint_<call>,
  449. * },
  450. * .event = &ftrace_event_type_<call>,
  451. * .print_fmt = print_fmt_<call>,
  452. * .flags = TRACE_EVENT_FL_TRACEPOINT,
  453. * };
  454. * // its only safe to use pointers when doing linker tricks to
  455. * // create an array.
  456. * static struct ftrace_event_call __used
  457. * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
  458. *
  459. */
  460. #ifdef CONFIG_PERF_EVENTS
  461. #define _TRACE_PERF_PROTO(call, proto) \
  462. static notrace void \
  463. perf_trace_##call(void *__data, proto);
  464. #define _TRACE_PERF_INIT(call) \
  465. .perf_probe = perf_trace_##call,
  466. #else
  467. #define _TRACE_PERF_PROTO(call, proto)
  468. #define _TRACE_PERF_INIT(call)
  469. #endif /* CONFIG_PERF_EVENTS */
  470. #undef __entry
  471. #define __entry entry
  472. #undef __field
  473. #define __field(type, item)
  474. #undef __array
  475. #define __array(type, item, len)
  476. #undef __dynamic_array
  477. #define __dynamic_array(type, item, len) \
  478. __entry->__data_loc_##item = __data_offsets.item;
  479. #undef __string
  480. #define __string(item, src) __dynamic_array(char, item, -1)
  481. #undef __assign_str
  482. #define __assign_str(dst, src) \
  483. strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
  484. #undef __bitmask
  485. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
  486. #undef __get_bitmask
  487. #define __get_bitmask(field) (char *)__get_dynamic_array(field)
  488. #undef __assign_bitmask
  489. #define __assign_bitmask(dst, src, nr_bits) \
  490. memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
  491. #undef TP_fast_assign
  492. #define TP_fast_assign(args...) args
  493. #undef __perf_addr
  494. #define __perf_addr(a) (a)
  495. #undef __perf_count
  496. #define __perf_count(c) (c)
  497. #undef __perf_task
  498. #define __perf_task(t) (t)
  499. #undef DECLARE_EVENT_CLASS
  500. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  501. \
  502. static notrace void \
  503. ftrace_raw_event_##call(void *__data, proto) \
  504. { \
  505. struct ftrace_event_file *ftrace_file = __data; \
  506. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  507. struct ftrace_event_buffer fbuffer; \
  508. struct ftrace_raw_##call *entry; \
  509. int __data_size; \
  510. \
  511. if (ftrace_trigger_soft_disabled(ftrace_file)) \
  512. return; \
  513. \
  514. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  515. \
  516. entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
  517. sizeof(*entry) + __data_size); \
  518. \
  519. if (!entry) \
  520. return; \
  521. \
  522. tstruct \
  523. \
  524. { assign; } \
  525. \
  526. ftrace_event_buffer_commit(&fbuffer); \
  527. }
  528. /*
  529. * The ftrace_test_probe is compiled out, it is only here as a build time check
  530. * to make sure that if the tracepoint handling changes, the ftrace probe will
  531. * fail to compile unless it too is updated.
  532. */
  533. #undef DEFINE_EVENT
  534. #define DEFINE_EVENT(template, call, proto, args) \
  535. static inline void ftrace_test_probe_##call(void) \
  536. { \
  537. check_trace_callback_type_##call(ftrace_raw_event_##template); \
  538. }
  539. #undef DEFINE_EVENT_PRINT
  540. #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
  541. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  542. #undef __entry
  543. #define __entry REC
  544. #undef __print_flags
  545. #undef __print_symbolic
  546. #undef __print_hex
  547. #undef __get_dynamic_array
  548. #undef __get_dynamic_array_len
  549. #undef __get_str
  550. #undef __get_bitmask
  551. #undef TP_printk
  552. #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
  553. #undef DECLARE_EVENT_CLASS
  554. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  555. _TRACE_PERF_PROTO(call, PARAMS(proto)); \
  556. static const char print_fmt_##call[] = print; \
  557. static struct ftrace_event_class __used __refdata event_class_##call = { \
  558. .system = __stringify(TRACE_SYSTEM), \
  559. .define_fields = ftrace_define_fields_##call, \
  560. .fields = LIST_HEAD_INIT(event_class_##call.fields),\
  561. .raw_init = trace_event_raw_init, \
  562. .probe = ftrace_raw_event_##call, \
  563. .reg = ftrace_event_reg, \
  564. _TRACE_PERF_INIT(call) \
  565. };
  566. #undef DEFINE_EVENT
  567. #define DEFINE_EVENT(template, call, proto, args) \
  568. \
  569. static struct ftrace_event_call __used event_##call = { \
  570. .class = &event_class_##template, \
  571. { \
  572. .tp = &__tracepoint_##call, \
  573. }, \
  574. .event.funcs = &ftrace_event_type_funcs_##template, \
  575. .print_fmt = print_fmt_##template, \
  576. .flags = TRACE_EVENT_FL_TRACEPOINT, \
  577. }; \
  578. static struct ftrace_event_call __used \
  579. __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
  580. #undef DEFINE_EVENT_PRINT
  581. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  582. \
  583. static const char print_fmt_##call[] = print; \
  584. \
  585. static struct ftrace_event_call __used event_##call = { \
  586. .class = &event_class_##template, \
  587. { \
  588. .tp = &__tracepoint_##call, \
  589. }, \
  590. .event.funcs = &ftrace_event_type_funcs_##call, \
  591. .print_fmt = print_fmt_##call, \
  592. .flags = TRACE_EVENT_FL_TRACEPOINT, \
  593. }; \
  594. static struct ftrace_event_call __used \
  595. __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
  596. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  597. #ifdef CONFIG_PERF_EVENTS
  598. #undef __entry
  599. #define __entry entry
  600. #undef __get_dynamic_array
  601. #define __get_dynamic_array(field) \
  602. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  603. #undef __get_dynamic_array_len
  604. #define __get_dynamic_array_len(field) \
  605. ((__entry->__data_loc_##field >> 16) & 0xffff)
  606. #undef __get_str
  607. #define __get_str(field) (char *)__get_dynamic_array(field)
  608. #undef __get_bitmask
  609. #define __get_bitmask(field) (char *)__get_dynamic_array(field)
  610. #undef __perf_addr
  611. #define __perf_addr(a) (__addr = (a))
  612. #undef __perf_count
  613. #define __perf_count(c) (__count = (c))
  614. #undef __perf_task
  615. #define __perf_task(t) (__task = (t))
  616. #undef DECLARE_EVENT_CLASS
  617. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  618. static notrace void \
  619. perf_trace_##call(void *__data, proto) \
  620. { \
  621. struct ftrace_event_call *event_call = __data; \
  622. struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
  623. struct ftrace_raw_##call *entry; \
  624. struct pt_regs __regs; \
  625. u64 __addr = 0, __count = 1; \
  626. struct task_struct *__task = NULL; \
  627. struct hlist_head *head; \
  628. int __entry_size; \
  629. int __data_size; \
  630. int rctx; \
  631. \
  632. __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
  633. \
  634. head = this_cpu_ptr(event_call->perf_events); \
  635. if (__builtin_constant_p(!__task) && !__task && \
  636. hlist_empty(head)) \
  637. return; \
  638. \
  639. __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
  640. sizeof(u64)); \
  641. __entry_size -= sizeof(u32); \
  642. \
  643. perf_fetch_caller_regs(&__regs); \
  644. entry = perf_trace_buf_prepare(__entry_size, \
  645. event_call->event.type, &__regs, &rctx); \
  646. if (!entry) \
  647. return; \
  648. \
  649. tstruct \
  650. \
  651. { assign; } \
  652. \
  653. perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
  654. __count, &__regs, head, __task); \
  655. }
  656. /*
  657. * This part is compiled out, it is only here as a build time check
  658. * to make sure that if the tracepoint handling changes, the
  659. * perf probe will fail to compile unless it too is updated.
  660. */
  661. #undef DEFINE_EVENT
  662. #define DEFINE_EVENT(template, call, proto, args) \
  663. static inline void perf_test_probe_##call(void) \
  664. { \
  665. check_trace_callback_type_##call(perf_trace_##template); \
  666. }
  667. #undef DEFINE_EVENT_PRINT
  668. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  669. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  670. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  671. #endif /* CONFIG_PERF_EVENTS */