trace_functions_graph.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. /* When set, irq functions will be ignored */
  17. static int ftrace_graph_skip_irqs;
  18. struct fgraph_cpu_data {
  19. pid_t last_pid;
  20. int depth;
  21. int depth_irq;
  22. int ignore;
  23. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  24. };
  25. struct fgraph_data {
  26. struct fgraph_cpu_data __percpu *cpu_data;
  27. /* Place to preserve last processed entry. */
  28. struct ftrace_graph_ent_entry ent;
  29. struct ftrace_graph_ret_entry ret;
  30. int failed;
  31. int cpu;
  32. };
  33. #define TRACE_GRAPH_INDENT 2
  34. /* Flag options */
  35. #define TRACE_GRAPH_PRINT_OVERRUN 0x1
  36. #define TRACE_GRAPH_PRINT_CPU 0x2
  37. #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
  38. #define TRACE_GRAPH_PRINT_PROC 0x8
  39. #define TRACE_GRAPH_PRINT_DURATION 0x10
  40. #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
  41. #define TRACE_GRAPH_PRINT_IRQS 0x40
  42. static struct tracer_opt trace_opts[] = {
  43. /* Display overruns? (for self-debug purpose) */
  44. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  45. /* Display CPU ? */
  46. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  47. /* Display Overhead ? */
  48. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  49. /* Display proc name/pid */
  50. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  51. /* Display duration of execution */
  52. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  53. /* Display absolute time of an entry */
  54. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  55. /* Display interrupts */
  56. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  57. { } /* Empty entry */
  58. };
  59. static struct tracer_flags tracer_flags = {
  60. /* Don't display overruns and proc by default */
  61. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  62. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
  63. .opts = trace_opts
  64. };
  65. static struct trace_array *graph_array;
  66. /* Add a function return address to the trace stack on thread info.*/
  67. int
  68. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  69. unsigned long frame_pointer)
  70. {
  71. unsigned long long calltime;
  72. int index;
  73. if (!current->ret_stack)
  74. return -EBUSY;
  75. /*
  76. * We must make sure the ret_stack is tested before we read
  77. * anything else.
  78. */
  79. smp_rmb();
  80. /* The return trace stack is full */
  81. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  82. atomic_inc(&current->trace_overrun);
  83. return -EBUSY;
  84. }
  85. calltime = trace_clock_local();
  86. index = ++current->curr_ret_stack;
  87. barrier();
  88. current->ret_stack[index].ret = ret;
  89. current->ret_stack[index].func = func;
  90. current->ret_stack[index].calltime = calltime;
  91. current->ret_stack[index].subtime = 0;
  92. current->ret_stack[index].fp = frame_pointer;
  93. *depth = index;
  94. return 0;
  95. }
  96. /* Retrieve a function return address to the trace stack on thread info.*/
  97. static void
  98. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  99. unsigned long frame_pointer)
  100. {
  101. int index;
  102. index = current->curr_ret_stack;
  103. if (unlikely(index < 0)) {
  104. ftrace_graph_stop();
  105. WARN_ON(1);
  106. /* Might as well panic, otherwise we have no where to go */
  107. *ret = (unsigned long)panic;
  108. return;
  109. }
  110. #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
  111. /*
  112. * The arch may choose to record the frame pointer used
  113. * and check it here to make sure that it is what we expect it
  114. * to be. If gcc does not set the place holder of the return
  115. * address in the frame pointer, and does a copy instead, then
  116. * the function graph trace will fail. This test detects this
  117. * case.
  118. *
  119. * Currently, x86_32 with optimize for size (-Os) makes the latest
  120. * gcc do the above.
  121. */
  122. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  123. ftrace_graph_stop();
  124. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  125. " from func %ps return to %lx\n",
  126. current->ret_stack[index].fp,
  127. frame_pointer,
  128. (void *)current->ret_stack[index].func,
  129. current->ret_stack[index].ret);
  130. *ret = (unsigned long)panic;
  131. return;
  132. }
  133. #endif
  134. *ret = current->ret_stack[index].ret;
  135. trace->func = current->ret_stack[index].func;
  136. trace->calltime = current->ret_stack[index].calltime;
  137. trace->overrun = atomic_read(&current->trace_overrun);
  138. trace->depth = index;
  139. }
  140. /*
  141. * Send the trace to the ring-buffer.
  142. * @return the original return address.
  143. */
  144. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  145. {
  146. struct ftrace_graph_ret trace;
  147. unsigned long ret;
  148. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  149. trace.rettime = trace_clock_local();
  150. ftrace_graph_return(&trace);
  151. barrier();
  152. current->curr_ret_stack--;
  153. if (unlikely(!ret)) {
  154. ftrace_graph_stop();
  155. WARN_ON(1);
  156. /* Might as well panic. What else to do? */
  157. ret = (unsigned long)panic;
  158. }
  159. return ret;
  160. }
  161. int __trace_graph_entry(struct trace_array *tr,
  162. struct ftrace_graph_ent *trace,
  163. unsigned long flags,
  164. int pc)
  165. {
  166. struct ftrace_event_call *call = &event_funcgraph_entry;
  167. struct ring_buffer_event *event;
  168. struct ring_buffer *buffer = tr->buffer;
  169. struct ftrace_graph_ent_entry *entry;
  170. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  171. return 0;
  172. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  173. sizeof(*entry), flags, pc);
  174. if (!event)
  175. return 0;
  176. entry = ring_buffer_event_data(event);
  177. entry->graph_ent = *trace;
  178. if (!filter_current_check_discard(buffer, call, entry, event))
  179. ring_buffer_unlock_commit(buffer, event);
  180. return 1;
  181. }
  182. static inline int ftrace_graph_ignore_irqs(void)
  183. {
  184. if (!ftrace_graph_skip_irqs)
  185. return 0;
  186. return in_irq();
  187. }
  188. int trace_graph_entry(struct ftrace_graph_ent *trace)
  189. {
  190. struct trace_array *tr = graph_array;
  191. struct trace_array_cpu *data;
  192. unsigned long flags;
  193. long disabled;
  194. int ret;
  195. int cpu;
  196. int pc;
  197. if (!ftrace_trace_task(current))
  198. return 0;
  199. /* trace it when it is-nested-in or is a function enabled. */
  200. if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
  201. ftrace_graph_ignore_irqs())
  202. return 0;
  203. local_irq_save(flags);
  204. cpu = raw_smp_processor_id();
  205. data = tr->data[cpu];
  206. disabled = atomic_inc_return(&data->disabled);
  207. if (likely(disabled == 1)) {
  208. pc = preempt_count();
  209. ret = __trace_graph_entry(tr, trace, flags, pc);
  210. } else {
  211. ret = 0;
  212. }
  213. atomic_dec(&data->disabled);
  214. local_irq_restore(flags);
  215. return ret;
  216. }
  217. int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
  218. {
  219. if (tracing_thresh)
  220. return 1;
  221. else
  222. return trace_graph_entry(trace);
  223. }
  224. void __trace_graph_return(struct trace_array *tr,
  225. struct ftrace_graph_ret *trace,
  226. unsigned long flags,
  227. int pc)
  228. {
  229. struct ftrace_event_call *call = &event_funcgraph_exit;
  230. struct ring_buffer_event *event;
  231. struct ring_buffer *buffer = tr->buffer;
  232. struct ftrace_graph_ret_entry *entry;
  233. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  234. return;
  235. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  236. sizeof(*entry), flags, pc);
  237. if (!event)
  238. return;
  239. entry = ring_buffer_event_data(event);
  240. entry->ret = *trace;
  241. if (!filter_current_check_discard(buffer, call, entry, event))
  242. ring_buffer_unlock_commit(buffer, event);
  243. }
  244. void trace_graph_return(struct ftrace_graph_ret *trace)
  245. {
  246. struct trace_array *tr = graph_array;
  247. struct trace_array_cpu *data;
  248. unsigned long flags;
  249. long disabled;
  250. int cpu;
  251. int pc;
  252. local_irq_save(flags);
  253. cpu = raw_smp_processor_id();
  254. data = tr->data[cpu];
  255. disabled = atomic_inc_return(&data->disabled);
  256. if (likely(disabled == 1)) {
  257. pc = preempt_count();
  258. __trace_graph_return(tr, trace, flags, pc);
  259. }
  260. atomic_dec(&data->disabled);
  261. local_irq_restore(flags);
  262. }
  263. void set_graph_array(struct trace_array *tr)
  264. {
  265. graph_array = tr;
  266. /* Make graph_array visible before we start tracing */
  267. smp_mb();
  268. }
  269. void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  270. {
  271. if (tracing_thresh &&
  272. (trace->rettime - trace->calltime < tracing_thresh))
  273. return;
  274. else
  275. trace_graph_return(trace);
  276. }
  277. static int graph_trace_init(struct trace_array *tr)
  278. {
  279. int ret;
  280. set_graph_array(tr);
  281. if (tracing_thresh)
  282. ret = register_ftrace_graph(&trace_graph_thresh_return,
  283. &trace_graph_thresh_entry);
  284. else
  285. ret = register_ftrace_graph(&trace_graph_return,
  286. &trace_graph_entry);
  287. if (ret)
  288. return ret;
  289. tracing_start_cmdline_record();
  290. return 0;
  291. }
  292. static void graph_trace_reset(struct trace_array *tr)
  293. {
  294. tracing_stop_cmdline_record();
  295. unregister_ftrace_graph();
  296. }
  297. static int max_bytes_for_cpu;
  298. static enum print_line_t
  299. print_graph_cpu(struct trace_seq *s, int cpu)
  300. {
  301. int ret;
  302. /*
  303. * Start with a space character - to make it stand out
  304. * to the right a bit when trace output is pasted into
  305. * email:
  306. */
  307. ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  308. if (!ret)
  309. return TRACE_TYPE_PARTIAL_LINE;
  310. return TRACE_TYPE_HANDLED;
  311. }
  312. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  313. static enum print_line_t
  314. print_graph_proc(struct trace_seq *s, pid_t pid)
  315. {
  316. char comm[TASK_COMM_LEN];
  317. /* sign + log10(MAX_INT) + '\0' */
  318. char pid_str[11];
  319. int spaces = 0;
  320. int ret;
  321. int len;
  322. int i;
  323. trace_find_cmdline(pid, comm);
  324. comm[7] = '\0';
  325. sprintf(pid_str, "%d", pid);
  326. /* 1 stands for the "-" character */
  327. len = strlen(comm) + strlen(pid_str) + 1;
  328. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  329. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  330. /* First spaces to align center */
  331. for (i = 0; i < spaces / 2; i++) {
  332. ret = trace_seq_printf(s, " ");
  333. if (!ret)
  334. return TRACE_TYPE_PARTIAL_LINE;
  335. }
  336. ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
  337. if (!ret)
  338. return TRACE_TYPE_PARTIAL_LINE;
  339. /* Last spaces to align center */
  340. for (i = 0; i < spaces - (spaces / 2); i++) {
  341. ret = trace_seq_printf(s, " ");
  342. if (!ret)
  343. return TRACE_TYPE_PARTIAL_LINE;
  344. }
  345. return TRACE_TYPE_HANDLED;
  346. }
  347. static enum print_line_t
  348. print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  349. {
  350. if (!trace_seq_putc(s, ' '))
  351. return 0;
  352. return trace_print_lat_fmt(s, entry);
  353. }
  354. /* If the pid changed since the last trace, output this event */
  355. static enum print_line_t
  356. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  357. {
  358. pid_t prev_pid;
  359. pid_t *last_pid;
  360. int ret;
  361. if (!data)
  362. return TRACE_TYPE_HANDLED;
  363. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  364. if (*last_pid == pid)
  365. return TRACE_TYPE_HANDLED;
  366. prev_pid = *last_pid;
  367. *last_pid = pid;
  368. if (prev_pid == -1)
  369. return TRACE_TYPE_HANDLED;
  370. /*
  371. * Context-switch trace line:
  372. ------------------------------------------
  373. | 1) migration/0--1 => sshd-1755
  374. ------------------------------------------
  375. */
  376. ret = trace_seq_printf(s,
  377. " ------------------------------------------\n");
  378. if (!ret)
  379. return TRACE_TYPE_PARTIAL_LINE;
  380. ret = print_graph_cpu(s, cpu);
  381. if (ret == TRACE_TYPE_PARTIAL_LINE)
  382. return TRACE_TYPE_PARTIAL_LINE;
  383. ret = print_graph_proc(s, prev_pid);
  384. if (ret == TRACE_TYPE_PARTIAL_LINE)
  385. return TRACE_TYPE_PARTIAL_LINE;
  386. ret = trace_seq_printf(s, " => ");
  387. if (!ret)
  388. return TRACE_TYPE_PARTIAL_LINE;
  389. ret = print_graph_proc(s, pid);
  390. if (ret == TRACE_TYPE_PARTIAL_LINE)
  391. return TRACE_TYPE_PARTIAL_LINE;
  392. ret = trace_seq_printf(s,
  393. "\n ------------------------------------------\n\n");
  394. if (!ret)
  395. return TRACE_TYPE_PARTIAL_LINE;
  396. return TRACE_TYPE_HANDLED;
  397. }
  398. static struct ftrace_graph_ret_entry *
  399. get_return_for_leaf(struct trace_iterator *iter,
  400. struct ftrace_graph_ent_entry *curr)
  401. {
  402. struct fgraph_data *data = iter->private;
  403. struct ring_buffer_iter *ring_iter = NULL;
  404. struct ring_buffer_event *event;
  405. struct ftrace_graph_ret_entry *next;
  406. /*
  407. * If the previous output failed to write to the seq buffer,
  408. * then we just reuse the data from before.
  409. */
  410. if (data && data->failed) {
  411. curr = &data->ent;
  412. next = &data->ret;
  413. } else {
  414. ring_iter = iter->buffer_iter[iter->cpu];
  415. /* First peek to compare current entry and the next one */
  416. if (ring_iter)
  417. event = ring_buffer_iter_peek(ring_iter, NULL);
  418. else {
  419. /*
  420. * We need to consume the current entry to see
  421. * the next one.
  422. */
  423. ring_buffer_consume(iter->tr->buffer, iter->cpu,
  424. NULL, NULL);
  425. event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
  426. NULL, NULL);
  427. }
  428. if (!event)
  429. return NULL;
  430. next = ring_buffer_event_data(event);
  431. if (data) {
  432. /*
  433. * Save current and next entries for later reference
  434. * if the output fails.
  435. */
  436. data->ent = *curr;
  437. /*
  438. * If the next event is not a return type, then
  439. * we only care about what type it is. Otherwise we can
  440. * safely copy the entire event.
  441. */
  442. if (next->ent.type == TRACE_GRAPH_RET)
  443. data->ret = *next;
  444. else
  445. data->ret.ent.type = next->ent.type;
  446. }
  447. }
  448. if (next->ent.type != TRACE_GRAPH_RET)
  449. return NULL;
  450. if (curr->ent.pid != next->ent.pid ||
  451. curr->graph_ent.func != next->ret.func)
  452. return NULL;
  453. /* this is a leaf, now advance the iterator */
  454. if (ring_iter)
  455. ring_buffer_read(ring_iter, NULL);
  456. return next;
  457. }
  458. /* Signal a overhead of time execution to the output */
  459. static int
  460. print_graph_overhead(unsigned long long duration, struct trace_seq *s,
  461. u32 flags)
  462. {
  463. /* If duration disappear, we don't need anything */
  464. if (!(flags & TRACE_GRAPH_PRINT_DURATION))
  465. return 1;
  466. /* Non nested entry or return */
  467. if (duration == -1)
  468. return trace_seq_printf(s, " ");
  469. if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
  470. /* Duration exceeded 100 msecs */
  471. if (duration > 100000ULL)
  472. return trace_seq_printf(s, "! ");
  473. /* Duration exceeded 10 msecs */
  474. if (duration > 10000ULL)
  475. return trace_seq_printf(s, "+ ");
  476. }
  477. return trace_seq_printf(s, " ");
  478. }
  479. static int print_graph_abs_time(u64 t, struct trace_seq *s)
  480. {
  481. unsigned long usecs_rem;
  482. usecs_rem = do_div(t, NSEC_PER_SEC);
  483. usecs_rem /= 1000;
  484. return trace_seq_printf(s, "%5lu.%06lu | ",
  485. (unsigned long)t, usecs_rem);
  486. }
  487. static enum print_line_t
  488. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  489. enum trace_type type, int cpu, pid_t pid, u32 flags)
  490. {
  491. int ret;
  492. struct trace_seq *s = &iter->seq;
  493. if (addr < (unsigned long)__irqentry_text_start ||
  494. addr >= (unsigned long)__irqentry_text_end)
  495. return TRACE_TYPE_UNHANDLED;
  496. /* Absolute time */
  497. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  498. ret = print_graph_abs_time(iter->ts, s);
  499. if (!ret)
  500. return TRACE_TYPE_PARTIAL_LINE;
  501. }
  502. /* Cpu */
  503. if (flags & TRACE_GRAPH_PRINT_CPU) {
  504. ret = print_graph_cpu(s, cpu);
  505. if (ret == TRACE_TYPE_PARTIAL_LINE)
  506. return TRACE_TYPE_PARTIAL_LINE;
  507. }
  508. /* Proc */
  509. if (flags & TRACE_GRAPH_PRINT_PROC) {
  510. ret = print_graph_proc(s, pid);
  511. if (ret == TRACE_TYPE_PARTIAL_LINE)
  512. return TRACE_TYPE_PARTIAL_LINE;
  513. ret = trace_seq_printf(s, " | ");
  514. if (!ret)
  515. return TRACE_TYPE_PARTIAL_LINE;
  516. }
  517. /* No overhead */
  518. ret = print_graph_overhead(-1, s, flags);
  519. if (!ret)
  520. return TRACE_TYPE_PARTIAL_LINE;
  521. if (type == TRACE_GRAPH_ENT)
  522. ret = trace_seq_printf(s, "==========>");
  523. else
  524. ret = trace_seq_printf(s, "<==========");
  525. if (!ret)
  526. return TRACE_TYPE_PARTIAL_LINE;
  527. /* Don't close the duration column if haven't one */
  528. if (flags & TRACE_GRAPH_PRINT_DURATION)
  529. trace_seq_printf(s, " |");
  530. ret = trace_seq_printf(s, "\n");
  531. if (!ret)
  532. return TRACE_TYPE_PARTIAL_LINE;
  533. return TRACE_TYPE_HANDLED;
  534. }
  535. enum print_line_t
  536. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  537. {
  538. unsigned long nsecs_rem = do_div(duration, 1000);
  539. /* log10(ULONG_MAX) + '\0' */
  540. char msecs_str[21];
  541. char nsecs_str[5];
  542. int ret, len;
  543. int i;
  544. sprintf(msecs_str, "%lu", (unsigned long) duration);
  545. /* Print msecs */
  546. ret = trace_seq_printf(s, "%s", msecs_str);
  547. if (!ret)
  548. return TRACE_TYPE_PARTIAL_LINE;
  549. len = strlen(msecs_str);
  550. /* Print nsecs (we don't want to exceed 7 numbers) */
  551. if (len < 7) {
  552. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  553. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  554. ret = trace_seq_printf(s, ".%s", nsecs_str);
  555. if (!ret)
  556. return TRACE_TYPE_PARTIAL_LINE;
  557. len += strlen(nsecs_str);
  558. }
  559. ret = trace_seq_printf(s, " us ");
  560. if (!ret)
  561. return TRACE_TYPE_PARTIAL_LINE;
  562. /* Print remaining spaces to fit the row's width */
  563. for (i = len; i < 7; i++) {
  564. ret = trace_seq_printf(s, " ");
  565. if (!ret)
  566. return TRACE_TYPE_PARTIAL_LINE;
  567. }
  568. return TRACE_TYPE_HANDLED;
  569. }
  570. static enum print_line_t
  571. print_graph_duration(unsigned long long duration, struct trace_seq *s)
  572. {
  573. int ret;
  574. ret = trace_print_graph_duration(duration, s);
  575. if (ret != TRACE_TYPE_HANDLED)
  576. return ret;
  577. ret = trace_seq_printf(s, "| ");
  578. if (!ret)
  579. return TRACE_TYPE_PARTIAL_LINE;
  580. return TRACE_TYPE_HANDLED;
  581. }
  582. /* Case of a leaf function on its call entry */
  583. static enum print_line_t
  584. print_graph_entry_leaf(struct trace_iterator *iter,
  585. struct ftrace_graph_ent_entry *entry,
  586. struct ftrace_graph_ret_entry *ret_entry,
  587. struct trace_seq *s, u32 flags)
  588. {
  589. struct fgraph_data *data = iter->private;
  590. struct ftrace_graph_ret *graph_ret;
  591. struct ftrace_graph_ent *call;
  592. unsigned long long duration;
  593. int ret;
  594. int i;
  595. graph_ret = &ret_entry->ret;
  596. call = &entry->graph_ent;
  597. duration = graph_ret->rettime - graph_ret->calltime;
  598. if (data) {
  599. struct fgraph_cpu_data *cpu_data;
  600. int cpu = iter->cpu;
  601. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  602. /*
  603. * Comments display at + 1 to depth. Since
  604. * this is a leaf function, keep the comments
  605. * equal to this depth.
  606. */
  607. cpu_data->depth = call->depth - 1;
  608. /* No need to keep this function around for this depth */
  609. if (call->depth < FTRACE_RETFUNC_DEPTH)
  610. cpu_data->enter_funcs[call->depth] = 0;
  611. }
  612. /* Overhead */
  613. ret = print_graph_overhead(duration, s, flags);
  614. if (!ret)
  615. return TRACE_TYPE_PARTIAL_LINE;
  616. /* Duration */
  617. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  618. ret = print_graph_duration(duration, s);
  619. if (ret == TRACE_TYPE_PARTIAL_LINE)
  620. return TRACE_TYPE_PARTIAL_LINE;
  621. }
  622. /* Function */
  623. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  624. ret = trace_seq_printf(s, " ");
  625. if (!ret)
  626. return TRACE_TYPE_PARTIAL_LINE;
  627. }
  628. ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
  629. if (!ret)
  630. return TRACE_TYPE_PARTIAL_LINE;
  631. return TRACE_TYPE_HANDLED;
  632. }
  633. static enum print_line_t
  634. print_graph_entry_nested(struct trace_iterator *iter,
  635. struct ftrace_graph_ent_entry *entry,
  636. struct trace_seq *s, int cpu, u32 flags)
  637. {
  638. struct ftrace_graph_ent *call = &entry->graph_ent;
  639. struct fgraph_data *data = iter->private;
  640. int ret;
  641. int i;
  642. if (data) {
  643. struct fgraph_cpu_data *cpu_data;
  644. int cpu = iter->cpu;
  645. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  646. cpu_data->depth = call->depth;
  647. /* Save this function pointer to see if the exit matches */
  648. if (call->depth < FTRACE_RETFUNC_DEPTH)
  649. cpu_data->enter_funcs[call->depth] = call->func;
  650. }
  651. /* No overhead */
  652. ret = print_graph_overhead(-1, s, flags);
  653. if (!ret)
  654. return TRACE_TYPE_PARTIAL_LINE;
  655. /* No time */
  656. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  657. ret = trace_seq_printf(s, " | ");
  658. if (!ret)
  659. return TRACE_TYPE_PARTIAL_LINE;
  660. }
  661. /* Function */
  662. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  663. ret = trace_seq_printf(s, " ");
  664. if (!ret)
  665. return TRACE_TYPE_PARTIAL_LINE;
  666. }
  667. ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  668. if (!ret)
  669. return TRACE_TYPE_PARTIAL_LINE;
  670. /*
  671. * we already consumed the current entry to check the next one
  672. * and see if this is a leaf.
  673. */
  674. return TRACE_TYPE_NO_CONSUME;
  675. }
  676. static enum print_line_t
  677. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  678. int type, unsigned long addr, u32 flags)
  679. {
  680. struct fgraph_data *data = iter->private;
  681. struct trace_entry *ent = iter->ent;
  682. int cpu = iter->cpu;
  683. int ret;
  684. /* Pid */
  685. if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
  686. return TRACE_TYPE_PARTIAL_LINE;
  687. if (type) {
  688. /* Interrupt */
  689. ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  690. if (ret == TRACE_TYPE_PARTIAL_LINE)
  691. return TRACE_TYPE_PARTIAL_LINE;
  692. }
  693. /* Absolute time */
  694. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  695. ret = print_graph_abs_time(iter->ts, s);
  696. if (!ret)
  697. return TRACE_TYPE_PARTIAL_LINE;
  698. }
  699. /* Cpu */
  700. if (flags & TRACE_GRAPH_PRINT_CPU) {
  701. ret = print_graph_cpu(s, cpu);
  702. if (ret == TRACE_TYPE_PARTIAL_LINE)
  703. return TRACE_TYPE_PARTIAL_LINE;
  704. }
  705. /* Proc */
  706. if (flags & TRACE_GRAPH_PRINT_PROC) {
  707. ret = print_graph_proc(s, ent->pid);
  708. if (ret == TRACE_TYPE_PARTIAL_LINE)
  709. return TRACE_TYPE_PARTIAL_LINE;
  710. ret = trace_seq_printf(s, " | ");
  711. if (!ret)
  712. return TRACE_TYPE_PARTIAL_LINE;
  713. }
  714. /* Latency format */
  715. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  716. ret = print_graph_lat_fmt(s, ent);
  717. if (ret == TRACE_TYPE_PARTIAL_LINE)
  718. return TRACE_TYPE_PARTIAL_LINE;
  719. }
  720. return 0;
  721. }
  722. /*
  723. * Entry check for irq code
  724. *
  725. * returns 1 if
  726. * - we are inside irq code
  727. * - we just extered irq code
  728. *
  729. * retunns 0 if
  730. * - funcgraph-interrupts option is set
  731. * - we are not inside irq code
  732. */
  733. static int
  734. check_irq_entry(struct trace_iterator *iter, u32 flags,
  735. unsigned long addr, int depth)
  736. {
  737. int cpu = iter->cpu;
  738. int *depth_irq;
  739. struct fgraph_data *data = iter->private;
  740. /*
  741. * If we are either displaying irqs, or we got called as
  742. * a graph event and private data does not exist,
  743. * then we bypass the irq check.
  744. */
  745. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  746. (!data))
  747. return 0;
  748. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  749. /*
  750. * We are inside the irq code
  751. */
  752. if (*depth_irq >= 0)
  753. return 1;
  754. if ((addr < (unsigned long)__irqentry_text_start) ||
  755. (addr >= (unsigned long)__irqentry_text_end))
  756. return 0;
  757. /*
  758. * We are entering irq code.
  759. */
  760. *depth_irq = depth;
  761. return 1;
  762. }
  763. /*
  764. * Return check for irq code
  765. *
  766. * returns 1 if
  767. * - we are inside irq code
  768. * - we just left irq code
  769. *
  770. * returns 0 if
  771. * - funcgraph-interrupts option is set
  772. * - we are not inside irq code
  773. */
  774. static int
  775. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  776. {
  777. int cpu = iter->cpu;
  778. int *depth_irq;
  779. struct fgraph_data *data = iter->private;
  780. /*
  781. * If we are either displaying irqs, or we got called as
  782. * a graph event and private data does not exist,
  783. * then we bypass the irq check.
  784. */
  785. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  786. (!data))
  787. return 0;
  788. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  789. /*
  790. * We are not inside the irq code.
  791. */
  792. if (*depth_irq == -1)
  793. return 0;
  794. /*
  795. * We are inside the irq code, and this is returning entry.
  796. * Let's not trace it and clear the entry depth, since
  797. * we are out of irq code.
  798. *
  799. * This condition ensures that we 'leave the irq code' once
  800. * we are out of the entry depth. Thus protecting us from
  801. * the RETURN entry loss.
  802. */
  803. if (*depth_irq >= depth) {
  804. *depth_irq = -1;
  805. return 1;
  806. }
  807. /*
  808. * We are inside the irq code, and this is not the entry.
  809. */
  810. return 1;
  811. }
  812. static enum print_line_t
  813. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  814. struct trace_iterator *iter, u32 flags)
  815. {
  816. struct fgraph_data *data = iter->private;
  817. struct ftrace_graph_ent *call = &field->graph_ent;
  818. struct ftrace_graph_ret_entry *leaf_ret;
  819. static enum print_line_t ret;
  820. int cpu = iter->cpu;
  821. if (check_irq_entry(iter, flags, call->func, call->depth))
  822. return TRACE_TYPE_HANDLED;
  823. if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
  824. return TRACE_TYPE_PARTIAL_LINE;
  825. leaf_ret = get_return_for_leaf(iter, field);
  826. if (leaf_ret)
  827. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  828. else
  829. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  830. if (data) {
  831. /*
  832. * If we failed to write our output, then we need to make
  833. * note of it. Because we already consumed our entry.
  834. */
  835. if (s->full) {
  836. data->failed = 1;
  837. data->cpu = cpu;
  838. } else
  839. data->failed = 0;
  840. }
  841. return ret;
  842. }
  843. static enum print_line_t
  844. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  845. struct trace_entry *ent, struct trace_iterator *iter,
  846. u32 flags)
  847. {
  848. unsigned long long duration = trace->rettime - trace->calltime;
  849. struct fgraph_data *data = iter->private;
  850. pid_t pid = ent->pid;
  851. int cpu = iter->cpu;
  852. int func_match = 1;
  853. int ret;
  854. int i;
  855. if (check_irq_return(iter, flags, trace->depth))
  856. return TRACE_TYPE_HANDLED;
  857. if (data) {
  858. struct fgraph_cpu_data *cpu_data;
  859. int cpu = iter->cpu;
  860. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  861. /*
  862. * Comments display at + 1 to depth. This is the
  863. * return from a function, we now want the comments
  864. * to display at the same level of the bracket.
  865. */
  866. cpu_data->depth = trace->depth - 1;
  867. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  868. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  869. func_match = 0;
  870. cpu_data->enter_funcs[trace->depth] = 0;
  871. }
  872. }
  873. if (print_graph_prologue(iter, s, 0, 0, flags))
  874. return TRACE_TYPE_PARTIAL_LINE;
  875. /* Overhead */
  876. ret = print_graph_overhead(duration, s, flags);
  877. if (!ret)
  878. return TRACE_TYPE_PARTIAL_LINE;
  879. /* Duration */
  880. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  881. ret = print_graph_duration(duration, s);
  882. if (ret == TRACE_TYPE_PARTIAL_LINE)
  883. return TRACE_TYPE_PARTIAL_LINE;
  884. }
  885. /* Closing brace */
  886. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  887. ret = trace_seq_printf(s, " ");
  888. if (!ret)
  889. return TRACE_TYPE_PARTIAL_LINE;
  890. }
  891. /*
  892. * If the return function does not have a matching entry,
  893. * then the entry was lost. Instead of just printing
  894. * the '}' and letting the user guess what function this
  895. * belongs to, write out the function name.
  896. */
  897. if (func_match) {
  898. ret = trace_seq_printf(s, "}\n");
  899. if (!ret)
  900. return TRACE_TYPE_PARTIAL_LINE;
  901. } else {
  902. ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  903. if (!ret)
  904. return TRACE_TYPE_PARTIAL_LINE;
  905. }
  906. /* Overrun */
  907. if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
  908. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  909. trace->overrun);
  910. if (!ret)
  911. return TRACE_TYPE_PARTIAL_LINE;
  912. }
  913. ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  914. cpu, pid, flags);
  915. if (ret == TRACE_TYPE_PARTIAL_LINE)
  916. return TRACE_TYPE_PARTIAL_LINE;
  917. return TRACE_TYPE_HANDLED;
  918. }
  919. static enum print_line_t
  920. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  921. struct trace_iterator *iter, u32 flags)
  922. {
  923. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  924. struct fgraph_data *data = iter->private;
  925. struct trace_event *event;
  926. int depth = 0;
  927. int ret;
  928. int i;
  929. if (data)
  930. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  931. if (print_graph_prologue(iter, s, 0, 0, flags))
  932. return TRACE_TYPE_PARTIAL_LINE;
  933. /* No overhead */
  934. ret = print_graph_overhead(-1, s, flags);
  935. if (!ret)
  936. return TRACE_TYPE_PARTIAL_LINE;
  937. /* No time */
  938. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  939. ret = trace_seq_printf(s, " | ");
  940. if (!ret)
  941. return TRACE_TYPE_PARTIAL_LINE;
  942. }
  943. /* Indentation */
  944. if (depth > 0)
  945. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
  946. ret = trace_seq_printf(s, " ");
  947. if (!ret)
  948. return TRACE_TYPE_PARTIAL_LINE;
  949. }
  950. /* The comment */
  951. ret = trace_seq_printf(s, "/* ");
  952. if (!ret)
  953. return TRACE_TYPE_PARTIAL_LINE;
  954. switch (iter->ent->type) {
  955. case TRACE_BPRINT:
  956. ret = trace_print_bprintk_msg_only(iter);
  957. if (ret != TRACE_TYPE_HANDLED)
  958. return ret;
  959. break;
  960. case TRACE_PRINT:
  961. ret = trace_print_printk_msg_only(iter);
  962. if (ret != TRACE_TYPE_HANDLED)
  963. return ret;
  964. break;
  965. default:
  966. event = ftrace_find_event(ent->type);
  967. if (!event)
  968. return TRACE_TYPE_UNHANDLED;
  969. ret = event->funcs->trace(iter, sym_flags, event);
  970. if (ret != TRACE_TYPE_HANDLED)
  971. return ret;
  972. }
  973. /* Strip ending newline */
  974. if (s->buffer[s->len - 1] == '\n') {
  975. s->buffer[s->len - 1] = '\0';
  976. s->len--;
  977. }
  978. ret = trace_seq_printf(s, " */\n");
  979. if (!ret)
  980. return TRACE_TYPE_PARTIAL_LINE;
  981. return TRACE_TYPE_HANDLED;
  982. }
  983. enum print_line_t
  984. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  985. {
  986. struct ftrace_graph_ent_entry *field;
  987. struct fgraph_data *data = iter->private;
  988. struct trace_entry *entry = iter->ent;
  989. struct trace_seq *s = &iter->seq;
  990. int cpu = iter->cpu;
  991. int ret;
  992. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  993. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  994. return TRACE_TYPE_HANDLED;
  995. }
  996. /*
  997. * If the last output failed, there's a possibility we need
  998. * to print out the missing entry which would never go out.
  999. */
  1000. if (data && data->failed) {
  1001. field = &data->ent;
  1002. iter->cpu = data->cpu;
  1003. ret = print_graph_entry(field, s, iter, flags);
  1004. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1005. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1006. ret = TRACE_TYPE_NO_CONSUME;
  1007. }
  1008. iter->cpu = cpu;
  1009. return ret;
  1010. }
  1011. switch (entry->type) {
  1012. case TRACE_GRAPH_ENT: {
  1013. /*
  1014. * print_graph_entry() may consume the current event,
  1015. * thus @field may become invalid, so we need to save it.
  1016. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1017. * it can be safely saved at the stack.
  1018. */
  1019. struct ftrace_graph_ent_entry saved;
  1020. trace_assign_type(field, entry);
  1021. saved = *field;
  1022. return print_graph_entry(&saved, s, iter, flags);
  1023. }
  1024. case TRACE_GRAPH_RET: {
  1025. struct ftrace_graph_ret_entry *field;
  1026. trace_assign_type(field, entry);
  1027. return print_graph_return(&field->ret, s, entry, iter, flags);
  1028. }
  1029. case TRACE_STACK:
  1030. case TRACE_FN:
  1031. /* dont trace stack and functions as comments */
  1032. return TRACE_TYPE_UNHANDLED;
  1033. default:
  1034. return print_graph_comment(s, entry, iter, flags);
  1035. }
  1036. return TRACE_TYPE_HANDLED;
  1037. }
  1038. static enum print_line_t
  1039. print_graph_function(struct trace_iterator *iter)
  1040. {
  1041. return print_graph_function_flags(iter, tracer_flags.val);
  1042. }
  1043. static enum print_line_t
  1044. print_graph_function_event(struct trace_iterator *iter, int flags,
  1045. struct trace_event *event)
  1046. {
  1047. return print_graph_function(iter);
  1048. }
  1049. static void print_lat_header(struct seq_file *s, u32 flags)
  1050. {
  1051. static const char spaces[] = " " /* 16 spaces */
  1052. " " /* 4 spaces */
  1053. " "; /* 17 spaces */
  1054. int size = 0;
  1055. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1056. size += 16;
  1057. if (flags & TRACE_GRAPH_PRINT_CPU)
  1058. size += 4;
  1059. if (flags & TRACE_GRAPH_PRINT_PROC)
  1060. size += 17;
  1061. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1062. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1063. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1064. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1065. seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
  1066. seq_printf(s, "#%.*s|||| / \n", size, spaces);
  1067. }
  1068. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1069. {
  1070. int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
  1071. if (lat)
  1072. print_lat_header(s, flags);
  1073. /* 1st line */
  1074. seq_printf(s, "#");
  1075. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1076. seq_printf(s, " TIME ");
  1077. if (flags & TRACE_GRAPH_PRINT_CPU)
  1078. seq_printf(s, " CPU");
  1079. if (flags & TRACE_GRAPH_PRINT_PROC)
  1080. seq_printf(s, " TASK/PID ");
  1081. if (lat)
  1082. seq_printf(s, "|||||");
  1083. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1084. seq_printf(s, " DURATION ");
  1085. seq_printf(s, " FUNCTION CALLS\n");
  1086. /* 2nd line */
  1087. seq_printf(s, "#");
  1088. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1089. seq_printf(s, " | ");
  1090. if (flags & TRACE_GRAPH_PRINT_CPU)
  1091. seq_printf(s, " | ");
  1092. if (flags & TRACE_GRAPH_PRINT_PROC)
  1093. seq_printf(s, " | | ");
  1094. if (lat)
  1095. seq_printf(s, "|||||");
  1096. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1097. seq_printf(s, " | | ");
  1098. seq_printf(s, " | | | |\n");
  1099. }
  1100. void print_graph_headers(struct seq_file *s)
  1101. {
  1102. print_graph_headers_flags(s, tracer_flags.val);
  1103. }
  1104. void graph_trace_open(struct trace_iterator *iter)
  1105. {
  1106. /* pid and depth on the last trace processed */
  1107. struct fgraph_data *data;
  1108. int cpu;
  1109. iter->private = NULL;
  1110. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1111. if (!data)
  1112. goto out_err;
  1113. data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
  1114. if (!data->cpu_data)
  1115. goto out_err_free;
  1116. for_each_possible_cpu(cpu) {
  1117. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1118. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1119. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1120. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1121. *pid = -1;
  1122. *depth = 0;
  1123. *ignore = 0;
  1124. *depth_irq = -1;
  1125. }
  1126. iter->private = data;
  1127. return;
  1128. out_err_free:
  1129. kfree(data);
  1130. out_err:
  1131. pr_warning("function graph tracer: not enough memory\n");
  1132. }
  1133. void graph_trace_close(struct trace_iterator *iter)
  1134. {
  1135. struct fgraph_data *data = iter->private;
  1136. if (data) {
  1137. free_percpu(data->cpu_data);
  1138. kfree(data);
  1139. }
  1140. }
  1141. static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
  1142. {
  1143. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1144. ftrace_graph_skip_irqs = !set;
  1145. return 0;
  1146. }
  1147. static struct trace_event_functions graph_functions = {
  1148. .trace = print_graph_function_event,
  1149. };
  1150. static struct trace_event graph_trace_entry_event = {
  1151. .type = TRACE_GRAPH_ENT,
  1152. .funcs = &graph_functions,
  1153. };
  1154. static struct trace_event graph_trace_ret_event = {
  1155. .type = TRACE_GRAPH_RET,
  1156. .funcs = &graph_functions
  1157. };
  1158. static struct tracer graph_trace __read_mostly = {
  1159. .name = "function_graph",
  1160. .open = graph_trace_open,
  1161. .pipe_open = graph_trace_open,
  1162. .close = graph_trace_close,
  1163. .pipe_close = graph_trace_close,
  1164. .wait_pipe = poll_wait_pipe,
  1165. .init = graph_trace_init,
  1166. .reset = graph_trace_reset,
  1167. .print_line = print_graph_function,
  1168. .print_header = print_graph_headers,
  1169. .flags = &tracer_flags,
  1170. .set_flag = func_graph_set_flag,
  1171. #ifdef CONFIG_FTRACE_SELFTEST
  1172. .selftest = trace_selftest_startup_function_graph,
  1173. #endif
  1174. };
  1175. static __init int init_graph_trace(void)
  1176. {
  1177. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1178. if (!register_ftrace_event(&graph_trace_entry_event)) {
  1179. pr_warning("Warning: could not register graph trace events\n");
  1180. return 1;
  1181. }
  1182. if (!register_ftrace_event(&graph_trace_ret_event)) {
  1183. pr_warning("Warning: could not register graph trace events\n");
  1184. return 1;
  1185. }
  1186. return register_tracer(&graph_trace);
  1187. }
  1188. device_initcall(init_graph_trace);