trace_events.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. /*
  2. * Stage 1 of the trace events.
  3. *
  4. * Override the macros in <trace/trace_events.h> to include the following:
  5. *
  6. * struct trace_event_raw_<call> {
  7. * struct trace_entry ent;
  8. * <type> <item>;
  9. * <type2> <item2>[<len>];
  10. * [...]
  11. * };
  12. *
  13. * The <type> <item> is created by the __field(type, item) macro or
  14. * the __array(type2, item2, len) macro.
  15. * We simply do "type item;", and that will create the fields
  16. * in the structure.
  17. */
  18. #include <linux/trace_events.h>
  19. #ifndef TRACE_SYSTEM_VAR
  20. #define TRACE_SYSTEM_VAR TRACE_SYSTEM
  21. #endif
  22. #define __app__(x, y) str__##x##y
  23. #define __app(x, y) __app__(x, y)
  24. #define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
  25. #define TRACE_MAKE_SYSTEM_STR() \
  26. static const char TRACE_SYSTEM_STRING[] = \
  27. __stringify(TRACE_SYSTEM)
  28. TRACE_MAKE_SYSTEM_STR();
  29. #undef TRACE_DEFINE_ENUM
  30. #define TRACE_DEFINE_ENUM(a) \
  31. static struct trace_eval_map __used __initdata \
  32. __##TRACE_SYSTEM##_##a = \
  33. { \
  34. .system = TRACE_SYSTEM_STRING, \
  35. .eval_string = #a, \
  36. .eval_value = a \
  37. }; \
  38. static struct trace_eval_map __used \
  39. __attribute__((section("_ftrace_eval_map"))) \
  40. *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
  41. #undef TRACE_DEFINE_SIZEOF
  42. #define TRACE_DEFINE_SIZEOF(a) \
  43. static struct trace_eval_map __used __initdata \
  44. __##TRACE_SYSTEM##_##a = \
  45. { \
  46. .system = TRACE_SYSTEM_STRING, \
  47. .eval_string = "sizeof(" #a ")", \
  48. .eval_value = sizeof(a) \
  49. }; \
  50. static struct trace_eval_map __used \
  51. __attribute__((section("_ftrace_eval_map"))) \
  52. *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
  53. /*
  54. * DECLARE_EVENT_CLASS can be used to add a generic function
  55. * handlers for events. That is, if all events have the same
  56. * parameters and just have distinct trace points.
  57. * Each tracepoint can be defined with DEFINE_EVENT and that
  58. * will map the DECLARE_EVENT_CLASS to the tracepoint.
  59. *
  60. * TRACE_EVENT is a one to one mapping between tracepoint and template.
  61. */
  62. #undef TRACE_EVENT
  63. #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  64. DECLARE_EVENT_CLASS(name, \
  65. PARAMS(proto), \
  66. PARAMS(args), \
  67. PARAMS(tstruct), \
  68. PARAMS(assign), \
  69. PARAMS(print)); \
  70. DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  71. #undef __field
  72. #define __field(type, item) type item;
  73. #undef __field_ext
  74. #define __field_ext(type, item, filter_type) type item;
  75. #undef __field_struct
  76. #define __field_struct(type, item) type item;
  77. #undef __field_struct_ext
  78. #define __field_struct_ext(type, item, filter_type) type item;
  79. #undef __array
  80. #define __array(type, item, len) type item[len];
  81. #undef __dynamic_array
  82. #define __dynamic_array(type, item, len) u32 __data_loc_##item;
  83. #undef __string
  84. #define __string(item, src) __dynamic_array(char, item, -1)
  85. #undef __bitmask
  86. #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
  87. #undef TP_STRUCT__entry
  88. #define TP_STRUCT__entry(args...) args
  89. #undef DECLARE_EVENT_CLASS
  90. #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
  91. struct trace_event_raw_##name { \
  92. struct trace_entry ent; \
  93. tstruct \
  94. char __data[0]; \
  95. }; \
  96. \
  97. static struct trace_event_class event_class_##name;
  98. #undef DEFINE_EVENT
  99. #define DEFINE_EVENT(template, name, proto, args) \
  100. static struct trace_event_call __used \
  101. __attribute__((__aligned__(4))) event_##name
  102. #undef DEFINE_EVENT_FN
  103. #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
  104. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  105. #undef DEFINE_EVENT_PRINT
  106. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  107. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  108. /* Callbacks are meaningless to ftrace. */
  109. #undef TRACE_EVENT_FN
  110. #define TRACE_EVENT_FN(name, proto, args, tstruct, \
  111. assign, print, reg, unreg) \
  112. TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
  113. PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
  114. #undef TRACE_EVENT_FN_COND
  115. #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
  116. assign, print, reg, unreg) \
  117. TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
  118. PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
  119. #undef TRACE_EVENT_FLAGS
  120. #define TRACE_EVENT_FLAGS(name, value) \
  121. __TRACE_EVENT_FLAGS(name, value)
  122. #undef TRACE_EVENT_PERF_PERM
  123. #define TRACE_EVENT_PERF_PERM(name, expr...) \
  124. __TRACE_EVENT_PERF_PERM(name, expr)
  125. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  126. /*
  127. * Stage 2 of the trace events.
  128. *
  129. * Include the following:
  130. *
  131. * struct trace_event_data_offsets_<call> {
  132. * u32 <item1>;
  133. * u32 <item2>;
  134. * [...]
  135. * };
  136. *
  137. * The __dynamic_array() macro will create each u32 <item>, this is
  138. * to keep the offset of each array from the beginning of the event.
  139. * The size of an array is also encoded, in the higher 16 bits of <item>.
  140. */
  141. #undef TRACE_DEFINE_ENUM
  142. #define TRACE_DEFINE_ENUM(a)
  143. #undef TRACE_DEFINE_SIZEOF
  144. #define TRACE_DEFINE_SIZEOF(a)
  145. #undef __field
  146. #define __field(type, item)
  147. #undef __field_ext
  148. #define __field_ext(type, item, filter_type)
  149. #undef __field_struct
  150. #define __field_struct(type, item)
  151. #undef __field_struct_ext
  152. #define __field_struct_ext(type, item, filter_type)
  153. #undef __array
  154. #define __array(type, item, len)
  155. #undef __dynamic_array
  156. #define __dynamic_array(type, item, len) u32 item;
  157. #undef __string
  158. #define __string(item, src) __dynamic_array(char, item, -1)
  159. #undef __bitmask
  160. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
  161. #undef DECLARE_EVENT_CLASS
  162. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  163. struct trace_event_data_offsets_##call { \
  164. tstruct; \
  165. };
  166. #undef DEFINE_EVENT
  167. #define DEFINE_EVENT(template, name, proto, args)
  168. #undef DEFINE_EVENT_PRINT
  169. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  170. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  171. #undef TRACE_EVENT_FLAGS
  172. #define TRACE_EVENT_FLAGS(event, flag)
  173. #undef TRACE_EVENT_PERF_PERM
  174. #define TRACE_EVENT_PERF_PERM(event, expr...)
  175. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  176. /*
  177. * Stage 3 of the trace events.
  178. *
  179. * Override the macros in <trace/trace_events.h> to include the following:
  180. *
  181. * enum print_line_t
  182. * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
  183. * {
  184. * struct trace_seq *s = &iter->seq;
  185. * struct trace_event_raw_<call> *field; <-- defined in stage 1
  186. * struct trace_entry *entry;
  187. * struct trace_seq *p = &iter->tmp_seq;
  188. * int ret;
  189. *
  190. * entry = iter->ent;
  191. *
  192. * if (entry->type != event_<call>->event.type) {
  193. * WARN_ON_ONCE(1);
  194. * return TRACE_TYPE_UNHANDLED;
  195. * }
  196. *
  197. * field = (typeof(field))entry;
  198. *
  199. * trace_seq_init(p);
  200. * ret = trace_seq_printf(s, "%s: ", <call>);
  201. * if (ret)
  202. * ret = trace_seq_printf(s, <TP_printk> "\n");
  203. * if (!ret)
  204. * return TRACE_TYPE_PARTIAL_LINE;
  205. *
  206. * return TRACE_TYPE_HANDLED;
  207. * }
  208. *
  209. * This is the method used to print the raw event to the trace
  210. * output format. Note, this is not needed if the data is read
  211. * in binary.
  212. */
  213. #undef __entry
  214. #define __entry field
  215. #undef TP_printk
  216. #define TP_printk(fmt, args...) fmt "\n", args
  217. #undef __get_dynamic_array
  218. #define __get_dynamic_array(field) \
  219. ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
  220. #undef __get_dynamic_array_len
  221. #define __get_dynamic_array_len(field) \
  222. ((__entry->__data_loc_##field >> 16) & 0xffff)
  223. #undef __get_str
  224. #define __get_str(field) ((char *)__get_dynamic_array(field))
  225. #undef __get_bitmask
  226. #define __get_bitmask(field) \
  227. ({ \
  228. void *__bitmask = __get_dynamic_array(field); \
  229. unsigned int __bitmask_size; \
  230. __bitmask_size = __get_dynamic_array_len(field); \
  231. trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
  232. })
  233. #undef __print_flags
  234. #define __print_flags(flag, delim, flag_array...) \
  235. ({ \
  236. static const struct trace_print_flags __flags[] = \
  237. { flag_array, { -1, NULL }}; \
  238. trace_print_flags_seq(p, delim, flag, __flags); \
  239. })
  240. #undef __print_symbolic
  241. #define __print_symbolic(value, symbol_array...) \
  242. ({ \
  243. static const struct trace_print_flags symbols[] = \
  244. { symbol_array, { -1, NULL }}; \
  245. trace_print_symbols_seq(p, value, symbols); \
  246. })
  247. #undef __print_flags_u64
  248. #undef __print_symbolic_u64
  249. #if BITS_PER_LONG == 32
  250. #define __print_flags_u64(flag, delim, flag_array...) \
  251. ({ \
  252. static const struct trace_print_flags_u64 __flags[] = \
  253. { flag_array, { -1, NULL } }; \
  254. trace_print_flags_seq_u64(p, delim, flag, __flags); \
  255. })
  256. #define __print_symbolic_u64(value, symbol_array...) \
  257. ({ \
  258. static const struct trace_print_flags_u64 symbols[] = \
  259. { symbol_array, { -1, NULL } }; \
  260. trace_print_symbols_seq_u64(p, value, symbols); \
  261. })
  262. #else
  263. #define __print_flags_u64(flag, delim, flag_array...) \
  264. __print_flags(flag, delim, flag_array)
  265. #define __print_symbolic_u64(value, symbol_array...) \
  266. __print_symbolic(value, symbol_array)
  267. #endif
  268. #undef __print_hex
  269. #define __print_hex(buf, buf_len) \
  270. trace_print_hex_seq(p, buf, buf_len, false)
  271. #undef __print_hex_str
  272. #define __print_hex_str(buf, buf_len) \
  273. trace_print_hex_seq(p, buf, buf_len, true)
  274. #undef __print_array
  275. #define __print_array(array, count, el_size) \
  276. ({ \
  277. BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
  278. el_size != 4 && el_size != 8); \
  279. trace_print_array_seq(p, array, count, el_size); \
  280. })
  281. #undef DECLARE_EVENT_CLASS
  282. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  283. static notrace enum print_line_t \
  284. trace_raw_output_##call(struct trace_iterator *iter, int flags, \
  285. struct trace_event *trace_event) \
  286. { \
  287. struct trace_seq *s = &iter->seq; \
  288. struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
  289. struct trace_event_raw_##call *field; \
  290. int ret; \
  291. \
  292. field = (typeof(field))iter->ent; \
  293. \
  294. ret = trace_raw_output_prep(iter, trace_event); \
  295. if (ret != TRACE_TYPE_HANDLED) \
  296. return ret; \
  297. \
  298. trace_seq_printf(s, print); \
  299. \
  300. return trace_handle_return(s); \
  301. } \
  302. static struct trace_event_functions trace_event_type_funcs_##call = { \
  303. .trace = trace_raw_output_##call, \
  304. };
  305. #undef DEFINE_EVENT_PRINT
  306. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  307. static notrace enum print_line_t \
  308. trace_raw_output_##call(struct trace_iterator *iter, int flags, \
  309. struct trace_event *event) \
  310. { \
  311. struct trace_event_raw_##template *field; \
  312. struct trace_entry *entry; \
  313. struct trace_seq *p = &iter->tmp_seq; \
  314. \
  315. entry = iter->ent; \
  316. \
  317. if (entry->type != event_##call.event.type) { \
  318. WARN_ON_ONCE(1); \
  319. return TRACE_TYPE_UNHANDLED; \
  320. } \
  321. \
  322. field = (typeof(field))entry; \
  323. \
  324. trace_seq_init(p); \
  325. return trace_output_call(iter, #call, print); \
  326. } \
  327. static struct trace_event_functions trace_event_type_funcs_##call = { \
  328. .trace = trace_raw_output_##call, \
  329. };
  330. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  331. #undef __field_ext
  332. #define __field_ext(type, item, filter_type) \
  333. ret = trace_define_field(event_call, #type, #item, \
  334. offsetof(typeof(field), item), \
  335. sizeof(field.item), \
  336. is_signed_type(type), filter_type); \
  337. if (ret) \
  338. return ret;
  339. #undef __field_struct_ext
  340. #define __field_struct_ext(type, item, filter_type) \
  341. ret = trace_define_field(event_call, #type, #item, \
  342. offsetof(typeof(field), item), \
  343. sizeof(field.item), \
  344. 0, filter_type); \
  345. if (ret) \
  346. return ret;
  347. #undef __field
  348. #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
  349. #undef __field_struct
  350. #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
  351. #undef __array
  352. #define __array(type, item, len) \
  353. do { \
  354. char *type_str = #type"["__stringify(len)"]"; \
  355. BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
  356. ret = trace_define_field(event_call, type_str, #item, \
  357. offsetof(typeof(field), item), \
  358. sizeof(field.item), \
  359. is_signed_type(type), FILTER_OTHER); \
  360. if (ret) \
  361. return ret; \
  362. } while (0);
  363. #undef __dynamic_array
  364. #define __dynamic_array(type, item, len) \
  365. ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
  366. offsetof(typeof(field), __data_loc_##item), \
  367. sizeof(field.__data_loc_##item), \
  368. is_signed_type(type), FILTER_OTHER);
  369. #undef __string
  370. #define __string(item, src) __dynamic_array(char, item, -1)
  371. #undef __bitmask
  372. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
  373. #undef DECLARE_EVENT_CLASS
  374. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
  375. static int notrace __init \
  376. trace_event_define_fields_##call(struct trace_event_call *event_call) \
  377. { \
  378. struct trace_event_raw_##call field; \
  379. int ret; \
  380. \
  381. tstruct; \
  382. \
  383. return ret; \
  384. }
  385. #undef DEFINE_EVENT
  386. #define DEFINE_EVENT(template, name, proto, args)
  387. #undef DEFINE_EVENT_PRINT
  388. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  389. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  390. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  391. /*
  392. * remember the offset of each array from the beginning of the event.
  393. */
  394. #undef __entry
  395. #define __entry entry
  396. #undef __field
  397. #define __field(type, item)
  398. #undef __field_ext
  399. #define __field_ext(type, item, filter_type)
  400. #undef __field_struct
  401. #define __field_struct(type, item)
  402. #undef __field_struct_ext
  403. #define __field_struct_ext(type, item, filter_type)
  404. #undef __array
  405. #define __array(type, item, len)
  406. #undef __dynamic_array
  407. #define __dynamic_array(type, item, len) \
  408. __item_length = (len) * sizeof(type); \
  409. __data_offsets->item = __data_size + \
  410. offsetof(typeof(*entry), __data); \
  411. __data_offsets->item |= __item_length << 16; \
  412. __data_size += __item_length;
  413. #undef __string
  414. #define __string(item, src) __dynamic_array(char, item, \
  415. strlen((src) ? (const char *)(src) : "(null)") + 1)
  416. /*
  417. * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
  418. * num_possible_cpus().
  419. */
  420. #define __bitmask_size_in_bytes_raw(nr_bits) \
  421. (((nr_bits) + 7) / 8)
  422. #define __bitmask_size_in_longs(nr_bits) \
  423. ((__bitmask_size_in_bytes_raw(nr_bits) + \
  424. ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
  425. /*
  426. * __bitmask_size_in_bytes is the number of bytes needed to hold
  427. * num_possible_cpus() padded out to the nearest long. This is what
  428. * is saved in the buffer, just to be consistent.
  429. */
  430. #define __bitmask_size_in_bytes(nr_bits) \
  431. (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
  432. #undef __bitmask
  433. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
  434. __bitmask_size_in_longs(nr_bits))
  435. #undef DECLARE_EVENT_CLASS
  436. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  437. static inline notrace int trace_event_get_offsets_##call( \
  438. struct trace_event_data_offsets_##call *__data_offsets, proto) \
  439. { \
  440. int __data_size = 0; \
  441. int __maybe_unused __item_length; \
  442. struct trace_event_raw_##call __maybe_unused *entry; \
  443. \
  444. tstruct; \
  445. \
  446. return __data_size; \
  447. }
  448. #undef DEFINE_EVENT
  449. #define DEFINE_EVENT(template, name, proto, args)
  450. #undef DEFINE_EVENT_PRINT
  451. #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
  452. DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  453. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  454. /*
  455. * Stage 4 of the trace events.
  456. *
  457. * Override the macros in <trace/trace_events.h> to include the following:
  458. *
  459. * For those macros defined with TRACE_EVENT:
  460. *
  461. * static struct trace_event_call event_<call>;
  462. *
  463. * static void trace_event_raw_event_<call>(void *__data, proto)
  464. * {
  465. * struct trace_event_file *trace_file = __data;
  466. * struct trace_event_call *event_call = trace_file->event_call;
  467. * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
  468. * unsigned long eflags = trace_file->flags;
  469. * enum event_trigger_type __tt = ETT_NONE;
  470. * struct ring_buffer_event *event;
  471. * struct trace_event_raw_<call> *entry; <-- defined in stage 1
  472. * struct ring_buffer *buffer;
  473. * unsigned long irq_flags;
  474. * int __data_size;
  475. * int pc;
  476. *
  477. * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
  478. * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
  479. * event_triggers_call(trace_file, NULL);
  480. * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
  481. * return;
  482. * }
  483. *
  484. * local_save_flags(irq_flags);
  485. * pc = preempt_count();
  486. *
  487. * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
  488. *
  489. * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
  490. * event_<call>->event.type,
  491. * sizeof(*entry) + __data_size,
  492. * irq_flags, pc);
  493. * if (!event)
  494. * return;
  495. * entry = ring_buffer_event_data(event);
  496. *
  497. * { <assign>; } <-- Here we assign the entries by the __field and
  498. * __array macros.
  499. *
  500. * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
  501. * __tt = event_triggers_call(trace_file, entry);
  502. *
  503. * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
  504. * &trace_file->flags))
  505. * ring_buffer_discard_commit(buffer, event);
  506. * else if (!filter_check_discard(trace_file, entry, buffer, event))
  507. * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
  508. *
  509. * if (__tt)
  510. * event_triggers_post_call(trace_file, __tt);
  511. * }
  512. *
  513. * static struct trace_event ftrace_event_type_<call> = {
  514. * .trace = trace_raw_output_<call>, <-- stage 2
  515. * };
  516. *
  517. * static char print_fmt_<call>[] = <TP_printk>;
  518. *
  519. * static struct trace_event_class __used event_class_<template> = {
  520. * .system = "<system>",
  521. * .define_fields = trace_event_define_fields_<call>,
  522. * .fields = LIST_HEAD_INIT(event_class_##call.fields),
  523. * .raw_init = trace_event_raw_init,
  524. * .probe = trace_event_raw_event_##call,
  525. * .reg = trace_event_reg,
  526. * };
  527. *
  528. * static struct trace_event_call event_<call> = {
  529. * .class = event_class_<template>,
  530. * {
  531. * .tp = &__tracepoint_<call>,
  532. * },
  533. * .event = &ftrace_event_type_<call>,
  534. * .print_fmt = print_fmt_<call>,
  535. * .flags = TRACE_EVENT_FL_TRACEPOINT,
  536. * };
  537. * // its only safe to use pointers when doing linker tricks to
  538. * // create an array.
  539. * static struct trace_event_call __used
  540. * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
  541. *
  542. */
  543. #ifdef CONFIG_PERF_EVENTS
  544. #define _TRACE_PERF_PROTO(call, proto) \
  545. static notrace void \
  546. perf_trace_##call(void *__data, proto);
  547. #define _TRACE_PERF_INIT(call) \
  548. .perf_probe = perf_trace_##call,
  549. #else
  550. #define _TRACE_PERF_PROTO(call, proto)
  551. #define _TRACE_PERF_INIT(call)
  552. #endif /* CONFIG_PERF_EVENTS */
  553. #undef __entry
  554. #define __entry entry
  555. #undef __field
  556. #define __field(type, item)
  557. #undef __field_struct
  558. #define __field_struct(type, item)
  559. #undef __array
  560. #define __array(type, item, len)
  561. #undef __dynamic_array
  562. #define __dynamic_array(type, item, len) \
  563. __entry->__data_loc_##item = __data_offsets.item;
  564. #undef __string
  565. #define __string(item, src) __dynamic_array(char, item, -1)
  566. #undef __assign_str
  567. #define __assign_str(dst, src) \
  568. strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
  569. #undef __bitmask
  570. #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
  571. #undef __get_bitmask
  572. #define __get_bitmask(field) (char *)__get_dynamic_array(field)
  573. #undef __assign_bitmask
  574. #define __assign_bitmask(dst, src, nr_bits) \
  575. memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
  576. #undef TP_fast_assign
  577. #define TP_fast_assign(args...) args
  578. #undef __perf_count
  579. #define __perf_count(c) (c)
  580. #undef __perf_task
  581. #define __perf_task(t) (t)
  582. #undef DECLARE_EVENT_CLASS
  583. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  584. \
  585. static notrace void \
  586. trace_event_raw_event_##call(void *__data, proto) \
  587. { \
  588. struct trace_event_file *trace_file = __data; \
  589. struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
  590. struct trace_event_buffer fbuffer; \
  591. struct trace_event_raw_##call *entry; \
  592. int __data_size; \
  593. \
  594. if (trace_trigger_soft_disabled(trace_file)) \
  595. return; \
  596. \
  597. __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
  598. \
  599. entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
  600. sizeof(*entry) + __data_size); \
  601. \
  602. if (!entry) \
  603. return; \
  604. \
  605. tstruct \
  606. \
  607. { assign; } \
  608. \
  609. trace_event_buffer_commit(&fbuffer); \
  610. }
  611. /*
  612. * The ftrace_test_probe is compiled out, it is only here as a build time check
  613. * to make sure that if the tracepoint handling changes, the ftrace probe will
  614. * fail to compile unless it too is updated.
  615. */
  616. #undef DEFINE_EVENT
  617. #define DEFINE_EVENT(template, call, proto, args) \
  618. static inline void ftrace_test_probe_##call(void) \
  619. { \
  620. check_trace_callback_type_##call(trace_event_raw_event_##template); \
  621. }
  622. #undef DEFINE_EVENT_PRINT
  623. #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
  624. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  625. #undef __entry
  626. #define __entry REC
  627. #undef __print_flags
  628. #undef __print_symbolic
  629. #undef __print_hex
  630. #undef __print_hex_str
  631. #undef __get_dynamic_array
  632. #undef __get_dynamic_array_len
  633. #undef __get_str
  634. #undef __get_bitmask
  635. #undef __print_array
  636. #undef TP_printk
  637. #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
  638. #undef DECLARE_EVENT_CLASS
  639. #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
  640. _TRACE_PERF_PROTO(call, PARAMS(proto)); \
  641. static char print_fmt_##call[] = print; \
  642. static struct trace_event_class __used __refdata event_class_##call = { \
  643. .system = TRACE_SYSTEM_STRING, \
  644. .define_fields = trace_event_define_fields_##call, \
  645. .fields = LIST_HEAD_INIT(event_class_##call.fields),\
  646. .raw_init = trace_event_raw_init, \
  647. .probe = trace_event_raw_event_##call, \
  648. .reg = trace_event_reg, \
  649. _TRACE_PERF_INIT(call) \
  650. };
  651. #undef DEFINE_EVENT
  652. #define DEFINE_EVENT(template, call, proto, args) \
  653. \
  654. static struct trace_event_call __used event_##call = { \
  655. .class = &event_class_##template, \
  656. { \
  657. .tp = &__tracepoint_##call, \
  658. }, \
  659. .event.funcs = &trace_event_type_funcs_##template, \
  660. .print_fmt = print_fmt_##template, \
  661. .flags = TRACE_EVENT_FL_TRACEPOINT, \
  662. }; \
  663. static struct trace_event_call __used \
  664. __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
  665. #undef DEFINE_EVENT_PRINT
  666. #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
  667. \
  668. static char print_fmt_##call[] = print; \
  669. \
  670. static struct trace_event_call __used event_##call = { \
  671. .class = &event_class_##template, \
  672. { \
  673. .tp = &__tracepoint_##call, \
  674. }, \
  675. .event.funcs = &trace_event_type_funcs_##call, \
  676. .print_fmt = print_fmt_##call, \
  677. .flags = TRACE_EVENT_FL_TRACEPOINT, \
  678. }; \
  679. static struct trace_event_call __used \
  680. __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
  681. #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)