trace_output.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275
  1. /*
  2. * trace_output.c
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/ftrace.h>
  10. #include "trace_output.h"
  11. /* must be a power of 2 */
  12. #define EVENT_HASHSIZE 128
  13. DECLARE_RWSEM(trace_event_sem);
  14. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  15. static int next_event_type = __TRACE_LAST_TYPE + 1;
  16. enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
  17. {
  18. struct trace_seq *s = &iter->seq;
  19. struct trace_entry *entry = iter->ent;
  20. struct bputs_entry *field;
  21. int ret;
  22. trace_assign_type(field, entry);
  23. ret = trace_seq_puts(s, field->str);
  24. if (!ret)
  25. return TRACE_TYPE_PARTIAL_LINE;
  26. return TRACE_TYPE_HANDLED;
  27. }
  28. enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
  29. {
  30. struct trace_seq *s = &iter->seq;
  31. struct trace_entry *entry = iter->ent;
  32. struct bprint_entry *field;
  33. int ret;
  34. trace_assign_type(field, entry);
  35. ret = trace_seq_bprintf(s, field->fmt, field->buf);
  36. if (!ret)
  37. return TRACE_TYPE_PARTIAL_LINE;
  38. return TRACE_TYPE_HANDLED;
  39. }
  40. enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
  41. {
  42. struct trace_seq *s = &iter->seq;
  43. struct trace_entry *entry = iter->ent;
  44. struct print_entry *field;
  45. int ret;
  46. trace_assign_type(field, entry);
  47. ret = trace_seq_puts(s, field->buf);
  48. if (!ret)
  49. return TRACE_TYPE_PARTIAL_LINE;
  50. return TRACE_TYPE_HANDLED;
  51. }
  52. const char *
  53. ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
  54. unsigned long flags,
  55. const struct trace_print_flags *flag_array)
  56. {
  57. unsigned long mask;
  58. const char *str;
  59. const char *ret = trace_seq_buffer_ptr(p);
  60. int i, first = 1;
  61. for (i = 0; flag_array[i].name && flags; i++) {
  62. mask = flag_array[i].mask;
  63. if ((flags & mask) != mask)
  64. continue;
  65. str = flag_array[i].name;
  66. flags &= ~mask;
  67. if (!first && delim)
  68. trace_seq_puts(p, delim);
  69. else
  70. first = 0;
  71. trace_seq_puts(p, str);
  72. }
  73. /* check for left over flags */
  74. if (flags) {
  75. if (!first && delim)
  76. trace_seq_puts(p, delim);
  77. trace_seq_printf(p, "0x%lx", flags);
  78. }
  79. trace_seq_putc(p, 0);
  80. return ret;
  81. }
  82. EXPORT_SYMBOL(ftrace_print_flags_seq);
  83. const char *
  84. ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  85. const struct trace_print_flags *symbol_array)
  86. {
  87. int i;
  88. const char *ret = trace_seq_buffer_ptr(p);
  89. for (i = 0; symbol_array[i].name; i++) {
  90. if (val != symbol_array[i].mask)
  91. continue;
  92. trace_seq_puts(p, symbol_array[i].name);
  93. break;
  94. }
  95. if (ret == (const char *)(trace_seq_buffer_ptr(p)))
  96. trace_seq_printf(p, "0x%lx", val);
  97. trace_seq_putc(p, 0);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL(ftrace_print_symbols_seq);
  101. #if BITS_PER_LONG == 32
  102. const char *
  103. ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
  104. const struct trace_print_flags_u64 *symbol_array)
  105. {
  106. int i;
  107. const char *ret = trace_seq_buffer_ptr(p);
  108. for (i = 0; symbol_array[i].name; i++) {
  109. if (val != symbol_array[i].mask)
  110. continue;
  111. trace_seq_puts(p, symbol_array[i].name);
  112. break;
  113. }
  114. if (ret == (const char *)(trace_seq_buffer_ptr(p)))
  115. trace_seq_printf(p, "0x%llx", val);
  116. trace_seq_putc(p, 0);
  117. return ret;
  118. }
  119. EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
  120. #endif
  121. const char *
  122. ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
  123. unsigned int bitmask_size)
  124. {
  125. const char *ret = trace_seq_buffer_ptr(p);
  126. trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
  127. trace_seq_putc(p, 0);
  128. return ret;
  129. }
  130. EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq);
  131. const char *
  132. ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
  133. {
  134. int i;
  135. const char *ret = trace_seq_buffer_ptr(p);
  136. for (i = 0; i < buf_len; i++)
  137. trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
  138. trace_seq_putc(p, 0);
  139. return ret;
  140. }
  141. EXPORT_SYMBOL(ftrace_print_hex_seq);
  142. int ftrace_raw_output_prep(struct trace_iterator *iter,
  143. struct trace_event *trace_event)
  144. {
  145. struct ftrace_event_call *event;
  146. struct trace_seq *s = &iter->seq;
  147. struct trace_seq *p = &iter->tmp_seq;
  148. struct trace_entry *entry;
  149. int ret;
  150. event = container_of(trace_event, struct ftrace_event_call, event);
  151. entry = iter->ent;
  152. if (entry->type != event->event.type) {
  153. WARN_ON_ONCE(1);
  154. return TRACE_TYPE_UNHANDLED;
  155. }
  156. trace_seq_init(p);
  157. ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
  158. if (!ret)
  159. return TRACE_TYPE_PARTIAL_LINE;
  160. return 0;
  161. }
  162. EXPORT_SYMBOL(ftrace_raw_output_prep);
  163. static int ftrace_output_raw(struct trace_iterator *iter, char *name,
  164. char *fmt, va_list ap)
  165. {
  166. struct trace_seq *s = &iter->seq;
  167. int ret;
  168. ret = trace_seq_printf(s, "%s: ", name);
  169. if (!ret)
  170. return TRACE_TYPE_PARTIAL_LINE;
  171. ret = trace_seq_vprintf(s, fmt, ap);
  172. if (!ret)
  173. return TRACE_TYPE_PARTIAL_LINE;
  174. return TRACE_TYPE_HANDLED;
  175. }
  176. int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
  177. {
  178. va_list ap;
  179. int ret;
  180. va_start(ap, fmt);
  181. ret = ftrace_output_raw(iter, name, fmt, ap);
  182. va_end(ap);
  183. return ret;
  184. }
  185. EXPORT_SYMBOL_GPL(ftrace_output_call);
  186. #ifdef CONFIG_KRETPROBES
  187. static inline const char *kretprobed(const char *name)
  188. {
  189. static const char tramp_name[] = "kretprobe_trampoline";
  190. int size = sizeof(tramp_name);
  191. if (strncmp(tramp_name, name, size) == 0)
  192. return "[unknown/kretprobe'd]";
  193. return name;
  194. }
  195. #else
  196. static inline const char *kretprobed(const char *name)
  197. {
  198. return name;
  199. }
  200. #endif /* CONFIG_KRETPROBES */
  201. static int
  202. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  203. {
  204. #ifdef CONFIG_KALLSYMS
  205. char str[KSYM_SYMBOL_LEN];
  206. const char *name;
  207. kallsyms_lookup(address, NULL, NULL, NULL, str);
  208. name = kretprobed(str);
  209. return trace_seq_printf(s, fmt, name);
  210. #endif
  211. return 1;
  212. }
  213. static int
  214. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  215. unsigned long address)
  216. {
  217. #ifdef CONFIG_KALLSYMS
  218. char str[KSYM_SYMBOL_LEN];
  219. const char *name;
  220. sprint_symbol(str, address);
  221. name = kretprobed(str);
  222. return trace_seq_printf(s, fmt, name);
  223. #endif
  224. return 1;
  225. }
  226. #ifndef CONFIG_64BIT
  227. # define IP_FMT "%08lx"
  228. #else
  229. # define IP_FMT "%016lx"
  230. #endif
  231. int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  232. unsigned long ip, unsigned long sym_flags)
  233. {
  234. struct file *file = NULL;
  235. unsigned long vmstart = 0;
  236. int ret = 1;
  237. if (s->full)
  238. return 0;
  239. if (mm) {
  240. const struct vm_area_struct *vma;
  241. down_read(&mm->mmap_sem);
  242. vma = find_vma(mm, ip);
  243. if (vma) {
  244. file = vma->vm_file;
  245. vmstart = vma->vm_start;
  246. }
  247. if (file) {
  248. ret = trace_seq_path(s, &file->f_path);
  249. if (ret)
  250. ret = trace_seq_printf(s, "[+0x%lx]",
  251. ip - vmstart);
  252. }
  253. up_read(&mm->mmap_sem);
  254. }
  255. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  256. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  257. return ret;
  258. }
  259. int
  260. seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
  261. unsigned long sym_flags)
  262. {
  263. struct mm_struct *mm = NULL;
  264. int ret = 1;
  265. unsigned int i;
  266. if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
  267. struct task_struct *task;
  268. /*
  269. * we do the lookup on the thread group leader,
  270. * since individual threads might have already quit!
  271. */
  272. rcu_read_lock();
  273. task = find_task_by_vpid(entry->tgid);
  274. if (task)
  275. mm = get_task_mm(task);
  276. rcu_read_unlock();
  277. }
  278. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  279. unsigned long ip = entry->caller[i];
  280. if (ip == ULONG_MAX || !ret)
  281. break;
  282. if (ret)
  283. ret = trace_seq_puts(s, " => ");
  284. if (!ip) {
  285. if (ret)
  286. ret = trace_seq_puts(s, "??");
  287. if (ret)
  288. ret = trace_seq_putc(s, '\n');
  289. continue;
  290. }
  291. if (!ret)
  292. break;
  293. if (ret)
  294. ret = seq_print_user_ip(s, mm, ip, sym_flags);
  295. ret = trace_seq_putc(s, '\n');
  296. }
  297. if (mm)
  298. mmput(mm);
  299. return ret;
  300. }
  301. int
  302. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  303. {
  304. int ret;
  305. if (!ip)
  306. return trace_seq_putc(s, '0');
  307. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  308. ret = seq_print_sym_offset(s, "%s", ip);
  309. else
  310. ret = seq_print_sym_short(s, "%s", ip);
  311. if (!ret)
  312. return 0;
  313. if (sym_flags & TRACE_ITER_SYM_ADDR)
  314. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  315. return ret;
  316. }
  317. /**
  318. * trace_print_lat_fmt - print the irq, preempt and lockdep fields
  319. * @s: trace seq struct to write to
  320. * @entry: The trace entry field from the ring buffer
  321. *
  322. * Prints the generic fields of irqs off, in hard or softirq, preempt
  323. * count.
  324. */
  325. int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  326. {
  327. char hardsoft_irq;
  328. char need_resched;
  329. char irqs_off;
  330. int hardirq;
  331. int softirq;
  332. int ret;
  333. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  334. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  335. irqs_off =
  336. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  337. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
  338. '.';
  339. switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
  340. TRACE_FLAG_PREEMPT_RESCHED)) {
  341. case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
  342. need_resched = 'N';
  343. break;
  344. case TRACE_FLAG_NEED_RESCHED:
  345. need_resched = 'n';
  346. break;
  347. case TRACE_FLAG_PREEMPT_RESCHED:
  348. need_resched = 'p';
  349. break;
  350. default:
  351. need_resched = '.';
  352. break;
  353. }
  354. hardsoft_irq =
  355. (hardirq && softirq) ? 'H' :
  356. hardirq ? 'h' :
  357. softirq ? 's' :
  358. '.';
  359. if (!trace_seq_printf(s, "%c%c%c",
  360. irqs_off, need_resched, hardsoft_irq))
  361. return 0;
  362. if (entry->preempt_count)
  363. ret = trace_seq_printf(s, "%x", entry->preempt_count);
  364. else
  365. ret = trace_seq_putc(s, '.');
  366. return ret;
  367. }
  368. static int
  369. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  370. {
  371. char comm[TASK_COMM_LEN];
  372. trace_find_cmdline(entry->pid, comm);
  373. if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
  374. comm, entry->pid, cpu))
  375. return 0;
  376. return trace_print_lat_fmt(s, entry);
  377. }
  378. static unsigned long preempt_mark_thresh_us = 100;
  379. static int
  380. lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
  381. {
  382. unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
  383. unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
  384. unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
  385. unsigned long long rel_ts = next_ts - iter->ts;
  386. struct trace_seq *s = &iter->seq;
  387. if (in_ns) {
  388. abs_ts = ns2usecs(abs_ts);
  389. rel_ts = ns2usecs(rel_ts);
  390. }
  391. if (verbose && in_ns) {
  392. unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
  393. unsigned long abs_msec = (unsigned long)abs_ts;
  394. unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
  395. unsigned long rel_msec = (unsigned long)rel_ts;
  396. return trace_seq_printf(
  397. s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
  398. ns2usecs(iter->ts),
  399. abs_msec, abs_usec,
  400. rel_msec, rel_usec);
  401. } else if (verbose && !in_ns) {
  402. return trace_seq_printf(
  403. s, "[%016llx] %lld (+%lld): ",
  404. iter->ts, abs_ts, rel_ts);
  405. } else if (!verbose && in_ns) {
  406. return trace_seq_printf(
  407. s, " %4lldus%c: ",
  408. abs_ts,
  409. rel_ts > preempt_mark_thresh_us ? '!' :
  410. rel_ts > 1 ? '+' : ' ');
  411. } else { /* !verbose && !in_ns */
  412. return trace_seq_printf(s, " %4lld: ", abs_ts);
  413. }
  414. }
  415. int trace_print_context(struct trace_iterator *iter)
  416. {
  417. struct trace_seq *s = &iter->seq;
  418. struct trace_entry *entry = iter->ent;
  419. unsigned long long t;
  420. unsigned long secs, usec_rem;
  421. char comm[TASK_COMM_LEN];
  422. int ret;
  423. trace_find_cmdline(entry->pid, comm);
  424. ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
  425. comm, entry->pid, iter->cpu);
  426. if (!ret)
  427. return 0;
  428. if (trace_flags & TRACE_ITER_IRQ_INFO) {
  429. ret = trace_print_lat_fmt(s, entry);
  430. if (!ret)
  431. return 0;
  432. }
  433. if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
  434. t = ns2usecs(iter->ts);
  435. usec_rem = do_div(t, USEC_PER_SEC);
  436. secs = (unsigned long)t;
  437. return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
  438. } else
  439. return trace_seq_printf(s, " %12llu: ", iter->ts);
  440. }
  441. int trace_print_lat_context(struct trace_iterator *iter)
  442. {
  443. u64 next_ts;
  444. int ret;
  445. /* trace_find_next_entry will reset ent_size */
  446. int ent_size = iter->ent_size;
  447. struct trace_seq *s = &iter->seq;
  448. struct trace_entry *entry = iter->ent,
  449. *next_entry = trace_find_next_entry(iter, NULL,
  450. &next_ts);
  451. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  452. /* Restore the original ent_size */
  453. iter->ent_size = ent_size;
  454. if (!next_entry)
  455. next_ts = iter->ts;
  456. if (verbose) {
  457. char comm[TASK_COMM_LEN];
  458. trace_find_cmdline(entry->pid, comm);
  459. ret = trace_seq_printf(
  460. s, "%16s %5d %3d %d %08x %08lx ",
  461. comm, entry->pid, iter->cpu, entry->flags,
  462. entry->preempt_count, iter->idx);
  463. } else {
  464. ret = lat_print_generic(s, entry, iter->cpu);
  465. }
  466. if (ret)
  467. ret = lat_print_timestamp(iter, next_ts);
  468. return ret;
  469. }
  470. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  471. static int task_state_char(unsigned long state)
  472. {
  473. int bit = state ? __ffs(state) + 1 : 0;
  474. return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
  475. }
  476. /**
  477. * ftrace_find_event - find a registered event
  478. * @type: the type of event to look for
  479. *
  480. * Returns an event of type @type otherwise NULL
  481. * Called with trace_event_read_lock() held.
  482. */
  483. struct trace_event *ftrace_find_event(int type)
  484. {
  485. struct trace_event *event;
  486. unsigned key;
  487. key = type & (EVENT_HASHSIZE - 1);
  488. hlist_for_each_entry(event, &event_hash[key], node) {
  489. if (event->type == type)
  490. return event;
  491. }
  492. return NULL;
  493. }
  494. static LIST_HEAD(ftrace_event_list);
  495. static int trace_search_list(struct list_head **list)
  496. {
  497. struct trace_event *e;
  498. int last = __TRACE_LAST_TYPE;
  499. if (list_empty(&ftrace_event_list)) {
  500. *list = &ftrace_event_list;
  501. return last + 1;
  502. }
  503. /*
  504. * We used up all possible max events,
  505. * lets see if somebody freed one.
  506. */
  507. list_for_each_entry(e, &ftrace_event_list, list) {
  508. if (e->type != last + 1)
  509. break;
  510. last++;
  511. }
  512. /* Did we used up all 65 thousand events??? */
  513. if ((last + 1) > FTRACE_MAX_EVENT)
  514. return 0;
  515. *list = &e->list;
  516. return last + 1;
  517. }
  518. void trace_event_read_lock(void)
  519. {
  520. down_read(&trace_event_sem);
  521. }
  522. void trace_event_read_unlock(void)
  523. {
  524. up_read(&trace_event_sem);
  525. }
  526. /**
  527. * register_ftrace_event - register output for an event type
  528. * @event: the event type to register
  529. *
  530. * Event types are stored in a hash and this hash is used to
  531. * find a way to print an event. If the @event->type is set
  532. * then it will use that type, otherwise it will assign a
  533. * type to use.
  534. *
  535. * If you assign your own type, please make sure it is added
  536. * to the trace_type enum in trace.h, to avoid collisions
  537. * with the dynamic types.
  538. *
  539. * Returns the event type number or zero on error.
  540. */
  541. int register_ftrace_event(struct trace_event *event)
  542. {
  543. unsigned key;
  544. int ret = 0;
  545. down_write(&trace_event_sem);
  546. if (WARN_ON(!event))
  547. goto out;
  548. if (WARN_ON(!event->funcs))
  549. goto out;
  550. INIT_LIST_HEAD(&event->list);
  551. if (!event->type) {
  552. struct list_head *list = NULL;
  553. if (next_event_type > FTRACE_MAX_EVENT) {
  554. event->type = trace_search_list(&list);
  555. if (!event->type)
  556. goto out;
  557. } else {
  558. event->type = next_event_type++;
  559. list = &ftrace_event_list;
  560. }
  561. if (WARN_ON(ftrace_find_event(event->type)))
  562. goto out;
  563. list_add_tail(&event->list, list);
  564. } else if (event->type > __TRACE_LAST_TYPE) {
  565. printk(KERN_WARNING "Need to add type to trace.h\n");
  566. WARN_ON(1);
  567. goto out;
  568. } else {
  569. /* Is this event already used */
  570. if (ftrace_find_event(event->type))
  571. goto out;
  572. }
  573. if (event->funcs->trace == NULL)
  574. event->funcs->trace = trace_nop_print;
  575. if (event->funcs->raw == NULL)
  576. event->funcs->raw = trace_nop_print;
  577. if (event->funcs->hex == NULL)
  578. event->funcs->hex = trace_nop_print;
  579. if (event->funcs->binary == NULL)
  580. event->funcs->binary = trace_nop_print;
  581. key = event->type & (EVENT_HASHSIZE - 1);
  582. hlist_add_head(&event->node, &event_hash[key]);
  583. ret = event->type;
  584. out:
  585. up_write(&trace_event_sem);
  586. return ret;
  587. }
  588. EXPORT_SYMBOL_GPL(register_ftrace_event);
  589. /*
  590. * Used by module code with the trace_event_sem held for write.
  591. */
  592. int __unregister_ftrace_event(struct trace_event *event)
  593. {
  594. hlist_del(&event->node);
  595. list_del(&event->list);
  596. return 0;
  597. }
  598. /**
  599. * unregister_ftrace_event - remove a no longer used event
  600. * @event: the event to remove
  601. */
  602. int unregister_ftrace_event(struct trace_event *event)
  603. {
  604. down_write(&trace_event_sem);
  605. __unregister_ftrace_event(event);
  606. up_write(&trace_event_sem);
  607. return 0;
  608. }
  609. EXPORT_SYMBOL_GPL(unregister_ftrace_event);
  610. /*
  611. * Standard events
  612. */
  613. enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
  614. struct trace_event *event)
  615. {
  616. if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
  617. return TRACE_TYPE_PARTIAL_LINE;
  618. return TRACE_TYPE_HANDLED;
  619. }
  620. /* TRACE_FN */
  621. static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
  622. struct trace_event *event)
  623. {
  624. struct ftrace_entry *field;
  625. struct trace_seq *s = &iter->seq;
  626. trace_assign_type(field, iter->ent);
  627. if (!seq_print_ip_sym(s, field->ip, flags))
  628. goto partial;
  629. if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
  630. if (!trace_seq_puts(s, " <-"))
  631. goto partial;
  632. if (!seq_print_ip_sym(s,
  633. field->parent_ip,
  634. flags))
  635. goto partial;
  636. }
  637. if (!trace_seq_putc(s, '\n'))
  638. goto partial;
  639. return TRACE_TYPE_HANDLED;
  640. partial:
  641. return TRACE_TYPE_PARTIAL_LINE;
  642. }
  643. static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
  644. struct trace_event *event)
  645. {
  646. struct ftrace_entry *field;
  647. trace_assign_type(field, iter->ent);
  648. if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
  649. field->ip,
  650. field->parent_ip))
  651. return TRACE_TYPE_PARTIAL_LINE;
  652. return TRACE_TYPE_HANDLED;
  653. }
  654. static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
  655. struct trace_event *event)
  656. {
  657. struct ftrace_entry *field;
  658. struct trace_seq *s = &iter->seq;
  659. trace_assign_type(field, iter->ent);
  660. SEQ_PUT_HEX_FIELD_RET(s, field->ip);
  661. SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
  662. return TRACE_TYPE_HANDLED;
  663. }
  664. static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
  665. struct trace_event *event)
  666. {
  667. struct ftrace_entry *field;
  668. struct trace_seq *s = &iter->seq;
  669. trace_assign_type(field, iter->ent);
  670. SEQ_PUT_FIELD_RET(s, field->ip);
  671. SEQ_PUT_FIELD_RET(s, field->parent_ip);
  672. return TRACE_TYPE_HANDLED;
  673. }
  674. static struct trace_event_functions trace_fn_funcs = {
  675. .trace = trace_fn_trace,
  676. .raw = trace_fn_raw,
  677. .hex = trace_fn_hex,
  678. .binary = trace_fn_bin,
  679. };
  680. static struct trace_event trace_fn_event = {
  681. .type = TRACE_FN,
  682. .funcs = &trace_fn_funcs,
  683. };
  684. /* TRACE_CTX an TRACE_WAKE */
  685. static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
  686. char *delim)
  687. {
  688. struct ctx_switch_entry *field;
  689. char comm[TASK_COMM_LEN];
  690. int S, T;
  691. trace_assign_type(field, iter->ent);
  692. T = task_state_char(field->next_state);
  693. S = task_state_char(field->prev_state);
  694. trace_find_cmdline(field->next_pid, comm);
  695. if (!trace_seq_printf(&iter->seq,
  696. " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
  697. field->prev_pid,
  698. field->prev_prio,
  699. S, delim,
  700. field->next_cpu,
  701. field->next_pid,
  702. field->next_prio,
  703. T, comm))
  704. return TRACE_TYPE_PARTIAL_LINE;
  705. return TRACE_TYPE_HANDLED;
  706. }
  707. static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
  708. struct trace_event *event)
  709. {
  710. return trace_ctxwake_print(iter, "==>");
  711. }
  712. static enum print_line_t trace_wake_print(struct trace_iterator *iter,
  713. int flags, struct trace_event *event)
  714. {
  715. return trace_ctxwake_print(iter, " +");
  716. }
  717. static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
  718. {
  719. struct ctx_switch_entry *field;
  720. int T;
  721. trace_assign_type(field, iter->ent);
  722. if (!S)
  723. S = task_state_char(field->prev_state);
  724. T = task_state_char(field->next_state);
  725. if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
  726. field->prev_pid,
  727. field->prev_prio,
  728. S,
  729. field->next_cpu,
  730. field->next_pid,
  731. field->next_prio,
  732. T))
  733. return TRACE_TYPE_PARTIAL_LINE;
  734. return TRACE_TYPE_HANDLED;
  735. }
  736. static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
  737. struct trace_event *event)
  738. {
  739. return trace_ctxwake_raw(iter, 0);
  740. }
  741. static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
  742. struct trace_event *event)
  743. {
  744. return trace_ctxwake_raw(iter, '+');
  745. }
  746. static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
  747. {
  748. struct ctx_switch_entry *field;
  749. struct trace_seq *s = &iter->seq;
  750. int T;
  751. trace_assign_type(field, iter->ent);
  752. if (!S)
  753. S = task_state_char(field->prev_state);
  754. T = task_state_char(field->next_state);
  755. SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
  756. SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
  757. SEQ_PUT_HEX_FIELD_RET(s, S);
  758. SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
  759. SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
  760. SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
  761. SEQ_PUT_HEX_FIELD_RET(s, T);
  762. return TRACE_TYPE_HANDLED;
  763. }
  764. static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
  765. struct trace_event *event)
  766. {
  767. return trace_ctxwake_hex(iter, 0);
  768. }
  769. static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
  770. struct trace_event *event)
  771. {
  772. return trace_ctxwake_hex(iter, '+');
  773. }
  774. static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
  775. int flags, struct trace_event *event)
  776. {
  777. struct ctx_switch_entry *field;
  778. struct trace_seq *s = &iter->seq;
  779. trace_assign_type(field, iter->ent);
  780. SEQ_PUT_FIELD_RET(s, field->prev_pid);
  781. SEQ_PUT_FIELD_RET(s, field->prev_prio);
  782. SEQ_PUT_FIELD_RET(s, field->prev_state);
  783. SEQ_PUT_FIELD_RET(s, field->next_pid);
  784. SEQ_PUT_FIELD_RET(s, field->next_prio);
  785. SEQ_PUT_FIELD_RET(s, field->next_state);
  786. return TRACE_TYPE_HANDLED;
  787. }
  788. static struct trace_event_functions trace_ctx_funcs = {
  789. .trace = trace_ctx_print,
  790. .raw = trace_ctx_raw,
  791. .hex = trace_ctx_hex,
  792. .binary = trace_ctxwake_bin,
  793. };
  794. static struct trace_event trace_ctx_event = {
  795. .type = TRACE_CTX,
  796. .funcs = &trace_ctx_funcs,
  797. };
  798. static struct trace_event_functions trace_wake_funcs = {
  799. .trace = trace_wake_print,
  800. .raw = trace_wake_raw,
  801. .hex = trace_wake_hex,
  802. .binary = trace_ctxwake_bin,
  803. };
  804. static struct trace_event trace_wake_event = {
  805. .type = TRACE_WAKE,
  806. .funcs = &trace_wake_funcs,
  807. };
  808. /* TRACE_STACK */
  809. static enum print_line_t trace_stack_print(struct trace_iterator *iter,
  810. int flags, struct trace_event *event)
  811. {
  812. struct stack_entry *field;
  813. struct trace_seq *s = &iter->seq;
  814. unsigned long *p;
  815. unsigned long *end;
  816. trace_assign_type(field, iter->ent);
  817. end = (unsigned long *)((long)iter->ent + iter->ent_size);
  818. if (!trace_seq_puts(s, "<stack trace>\n"))
  819. goto partial;
  820. for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
  821. if (!trace_seq_puts(s, " => "))
  822. goto partial;
  823. if (!seq_print_ip_sym(s, *p, flags))
  824. goto partial;
  825. if (!trace_seq_putc(s, '\n'))
  826. goto partial;
  827. }
  828. return TRACE_TYPE_HANDLED;
  829. partial:
  830. return TRACE_TYPE_PARTIAL_LINE;
  831. }
  832. static struct trace_event_functions trace_stack_funcs = {
  833. .trace = trace_stack_print,
  834. };
  835. static struct trace_event trace_stack_event = {
  836. .type = TRACE_STACK,
  837. .funcs = &trace_stack_funcs,
  838. };
  839. /* TRACE_USER_STACK */
  840. static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
  841. int flags, struct trace_event *event)
  842. {
  843. struct userstack_entry *field;
  844. struct trace_seq *s = &iter->seq;
  845. trace_assign_type(field, iter->ent);
  846. if (!trace_seq_puts(s, "<user stack trace>\n"))
  847. goto partial;
  848. if (!seq_print_userip_objs(field, s, flags))
  849. goto partial;
  850. return TRACE_TYPE_HANDLED;
  851. partial:
  852. return TRACE_TYPE_PARTIAL_LINE;
  853. }
  854. static struct trace_event_functions trace_user_stack_funcs = {
  855. .trace = trace_user_stack_print,
  856. };
  857. static struct trace_event trace_user_stack_event = {
  858. .type = TRACE_USER_STACK,
  859. .funcs = &trace_user_stack_funcs,
  860. };
  861. /* TRACE_BPUTS */
  862. static enum print_line_t
  863. trace_bputs_print(struct trace_iterator *iter, int flags,
  864. struct trace_event *event)
  865. {
  866. struct trace_entry *entry = iter->ent;
  867. struct trace_seq *s = &iter->seq;
  868. struct bputs_entry *field;
  869. trace_assign_type(field, entry);
  870. if (!seq_print_ip_sym(s, field->ip, flags))
  871. goto partial;
  872. if (!trace_seq_puts(s, ": "))
  873. goto partial;
  874. if (!trace_seq_puts(s, field->str))
  875. goto partial;
  876. return TRACE_TYPE_HANDLED;
  877. partial:
  878. return TRACE_TYPE_PARTIAL_LINE;
  879. }
  880. static enum print_line_t
  881. trace_bputs_raw(struct trace_iterator *iter, int flags,
  882. struct trace_event *event)
  883. {
  884. struct bputs_entry *field;
  885. struct trace_seq *s = &iter->seq;
  886. trace_assign_type(field, iter->ent);
  887. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  888. goto partial;
  889. if (!trace_seq_puts(s, field->str))
  890. goto partial;
  891. return TRACE_TYPE_HANDLED;
  892. partial:
  893. return TRACE_TYPE_PARTIAL_LINE;
  894. }
  895. static struct trace_event_functions trace_bputs_funcs = {
  896. .trace = trace_bputs_print,
  897. .raw = trace_bputs_raw,
  898. };
  899. static struct trace_event trace_bputs_event = {
  900. .type = TRACE_BPUTS,
  901. .funcs = &trace_bputs_funcs,
  902. };
  903. /* TRACE_BPRINT */
  904. static enum print_line_t
  905. trace_bprint_print(struct trace_iterator *iter, int flags,
  906. struct trace_event *event)
  907. {
  908. struct trace_entry *entry = iter->ent;
  909. struct trace_seq *s = &iter->seq;
  910. struct bprint_entry *field;
  911. trace_assign_type(field, entry);
  912. if (!seq_print_ip_sym(s, field->ip, flags))
  913. goto partial;
  914. if (!trace_seq_puts(s, ": "))
  915. goto partial;
  916. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  917. goto partial;
  918. return TRACE_TYPE_HANDLED;
  919. partial:
  920. return TRACE_TYPE_PARTIAL_LINE;
  921. }
  922. static enum print_line_t
  923. trace_bprint_raw(struct trace_iterator *iter, int flags,
  924. struct trace_event *event)
  925. {
  926. struct bprint_entry *field;
  927. struct trace_seq *s = &iter->seq;
  928. trace_assign_type(field, iter->ent);
  929. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  930. goto partial;
  931. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  932. goto partial;
  933. return TRACE_TYPE_HANDLED;
  934. partial:
  935. return TRACE_TYPE_PARTIAL_LINE;
  936. }
  937. static struct trace_event_functions trace_bprint_funcs = {
  938. .trace = trace_bprint_print,
  939. .raw = trace_bprint_raw,
  940. };
  941. static struct trace_event trace_bprint_event = {
  942. .type = TRACE_BPRINT,
  943. .funcs = &trace_bprint_funcs,
  944. };
  945. /* TRACE_PRINT */
  946. static enum print_line_t trace_print_print(struct trace_iterator *iter,
  947. int flags, struct trace_event *event)
  948. {
  949. struct print_entry *field;
  950. struct trace_seq *s = &iter->seq;
  951. trace_assign_type(field, iter->ent);
  952. if (!seq_print_ip_sym(s, field->ip, flags))
  953. goto partial;
  954. if (!trace_seq_printf(s, ": %s", field->buf))
  955. goto partial;
  956. return TRACE_TYPE_HANDLED;
  957. partial:
  958. return TRACE_TYPE_PARTIAL_LINE;
  959. }
  960. static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
  961. struct trace_event *event)
  962. {
  963. struct print_entry *field;
  964. trace_assign_type(field, iter->ent);
  965. if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
  966. goto partial;
  967. return TRACE_TYPE_HANDLED;
  968. partial:
  969. return TRACE_TYPE_PARTIAL_LINE;
  970. }
  971. static struct trace_event_functions trace_print_funcs = {
  972. .trace = trace_print_print,
  973. .raw = trace_print_raw,
  974. };
  975. static struct trace_event trace_print_event = {
  976. .type = TRACE_PRINT,
  977. .funcs = &trace_print_funcs,
  978. };
  979. static struct trace_event *events[] __initdata = {
  980. &trace_fn_event,
  981. &trace_ctx_event,
  982. &trace_wake_event,
  983. &trace_stack_event,
  984. &trace_user_stack_event,
  985. &trace_bputs_event,
  986. &trace_bprint_event,
  987. &trace_print_event,
  988. NULL
  989. };
  990. __init static int init_events(void)
  991. {
  992. struct trace_event *event;
  993. int i, ret;
  994. for (i = 0; events[i]; i++) {
  995. event = events[i];
  996. ret = register_ftrace_event(event);
  997. if (!ret) {
  998. printk(KERN_WARNING "event %d failed to register\n",
  999. event->type);
  1000. WARN_ON_ONCE(1);
  1001. }
  1002. }
  1003. return 0;
  1004. }
  1005. early_initcall(init_events);