trace_functions_graph.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/uaccess.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/slab.h>
  12. #include <linux/fs.h>
  13. #include "trace.h"
  14. #include "trace_output.h"
  15. static bool kill_ftrace_graph;
  16. /**
  17. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  18. *
  19. * ftrace_graph_stop() is called when a severe error is detected in
  20. * the function graph tracing. This function is called by the critical
  21. * paths of function graph to keep those paths from doing any more harm.
  22. */
  23. bool ftrace_graph_is_dead(void)
  24. {
  25. return kill_ftrace_graph;
  26. }
  27. /**
  28. * ftrace_graph_stop - set to permanently disable function graph tracincg
  29. *
  30. * In case of an error int function graph tracing, this is called
  31. * to try to keep function graph tracing from causing any more harm.
  32. * Usually this is pretty severe and this is called to try to at least
  33. * get a warning out to the user.
  34. */
  35. void ftrace_graph_stop(void)
  36. {
  37. kill_ftrace_graph = true;
  38. }
  39. /* When set, irq functions will be ignored */
  40. static int ftrace_graph_skip_irqs;
  41. struct fgraph_cpu_data {
  42. pid_t last_pid;
  43. int depth;
  44. int depth_irq;
  45. int ignore;
  46. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  47. };
  48. struct fgraph_data {
  49. struct fgraph_cpu_data __percpu *cpu_data;
  50. /* Place to preserve last processed entry. */
  51. struct ftrace_graph_ent_entry ent;
  52. struct ftrace_graph_ret_entry ret;
  53. int failed;
  54. int cpu;
  55. };
  56. #define TRACE_GRAPH_INDENT 2
  57. static unsigned int max_depth;
  58. static struct tracer_opt trace_opts[] = {
  59. /* Display overruns? (for self-debug purpose) */
  60. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  61. /* Display CPU ? */
  62. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  63. /* Display Overhead ? */
  64. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  65. /* Display proc name/pid */
  66. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  67. /* Display duration of execution */
  68. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  69. /* Display absolute time of an entry */
  70. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  71. /* Display interrupts */
  72. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  73. /* Display function name after trailing } */
  74. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  75. /* Include sleep time (scheduled out) between entry and return */
  76. { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  77. /* Include time within nested functions */
  78. { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  79. { } /* Empty entry */
  80. };
  81. static struct tracer_flags tracer_flags = {
  82. /* Don't display overruns, proc, or tail by default */
  83. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  84. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  85. TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  86. .opts = trace_opts
  87. };
  88. static struct trace_array *graph_array;
  89. /*
  90. * DURATION column is being also used to display IRQ signs,
  91. * following values are used by print_graph_irq and others
  92. * to fill in space into DURATION column.
  93. */
  94. enum {
  95. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  96. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  97. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  98. };
  99. static void
  100. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  101. struct trace_seq *s, u32 flags);
  102. /* Add a function return address to the trace stack on thread info.*/
  103. int
  104. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  105. unsigned long frame_pointer)
  106. {
  107. unsigned long long calltime;
  108. int index;
  109. if (unlikely(ftrace_graph_is_dead()))
  110. return -EBUSY;
  111. if (!current->ret_stack)
  112. return -EBUSY;
  113. /*
  114. * We must make sure the ret_stack is tested before we read
  115. * anything else.
  116. */
  117. smp_rmb();
  118. /* The return trace stack is full */
  119. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  120. atomic_inc(&current->trace_overrun);
  121. return -EBUSY;
  122. }
  123. /*
  124. * The curr_ret_stack is an index to ftrace return stack of
  125. * current task. Its value should be in [0, FTRACE_RETFUNC_
  126. * DEPTH) when the function graph tracer is used. To support
  127. * filtering out specific functions, it makes the index
  128. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  129. * so when it sees a negative index the ftrace will ignore
  130. * the record. And the index gets recovered when returning
  131. * from the filtered function by adding the FTRACE_NOTRACE_
  132. * DEPTH and then it'll continue to record functions normally.
  133. *
  134. * The curr_ret_stack is initialized to -1 and get increased
  135. * in this function. So it can be less than -1 only if it was
  136. * filtered out via ftrace_graph_notrace_addr() which can be
  137. * set from set_graph_notrace file in tracefs by user.
  138. */
  139. if (current->curr_ret_stack < -1)
  140. return -EBUSY;
  141. calltime = trace_clock_local();
  142. index = ++current->curr_ret_stack;
  143. if (ftrace_graph_notrace_addr(func))
  144. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  145. barrier();
  146. current->ret_stack[index].ret = ret;
  147. current->ret_stack[index].func = func;
  148. current->ret_stack[index].calltime = calltime;
  149. current->ret_stack[index].subtime = 0;
  150. current->ret_stack[index].fp = frame_pointer;
  151. *depth = current->curr_ret_stack;
  152. return 0;
  153. }
  154. /* Retrieve a function return address to the trace stack on thread info.*/
  155. static void
  156. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  157. unsigned long frame_pointer)
  158. {
  159. int index;
  160. index = current->curr_ret_stack;
  161. /*
  162. * A negative index here means that it's just returned from a
  163. * notrace'd function. Recover index to get an original
  164. * return address. See ftrace_push_return_trace().
  165. *
  166. * TODO: Need to check whether the stack gets corrupted.
  167. */
  168. if (index < 0)
  169. index += FTRACE_NOTRACE_DEPTH;
  170. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  171. ftrace_graph_stop();
  172. WARN_ON(1);
  173. /* Might as well panic, otherwise we have no where to go */
  174. *ret = (unsigned long)panic;
  175. return;
  176. }
  177. #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
  178. /*
  179. * The arch may choose to record the frame pointer used
  180. * and check it here to make sure that it is what we expect it
  181. * to be. If gcc does not set the place holder of the return
  182. * address in the frame pointer, and does a copy instead, then
  183. * the function graph trace will fail. This test detects this
  184. * case.
  185. *
  186. * Currently, x86_32 with optimize for size (-Os) makes the latest
  187. * gcc do the above.
  188. *
  189. * Note, -mfentry does not use frame pointers, and this test
  190. * is not needed if CC_USING_FENTRY is set.
  191. */
  192. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  193. ftrace_graph_stop();
  194. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  195. " from func %ps return to %lx\n",
  196. current->ret_stack[index].fp,
  197. frame_pointer,
  198. (void *)current->ret_stack[index].func,
  199. current->ret_stack[index].ret);
  200. *ret = (unsigned long)panic;
  201. return;
  202. }
  203. #endif
  204. *ret = current->ret_stack[index].ret;
  205. trace->func = current->ret_stack[index].func;
  206. trace->calltime = current->ret_stack[index].calltime;
  207. trace->overrun = atomic_read(&current->trace_overrun);
  208. trace->depth = index;
  209. }
  210. /*
  211. * Send the trace to the ring-buffer.
  212. * @return the original return address.
  213. */
  214. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  215. {
  216. struct ftrace_graph_ret trace;
  217. unsigned long ret;
  218. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  219. trace.rettime = trace_clock_local();
  220. barrier();
  221. current->curr_ret_stack--;
  222. /*
  223. * The curr_ret_stack can be less than -1 only if it was
  224. * filtered out and it's about to return from the function.
  225. * Recover the index and continue to trace normal functions.
  226. */
  227. if (current->curr_ret_stack < -1) {
  228. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  229. return ret;
  230. }
  231. /*
  232. * The trace should run after decrementing the ret counter
  233. * in case an interrupt were to come in. We don't want to
  234. * lose the interrupt if max_depth is set.
  235. */
  236. ftrace_graph_return(&trace);
  237. if (unlikely(!ret)) {
  238. ftrace_graph_stop();
  239. WARN_ON(1);
  240. /* Might as well panic. What else to do? */
  241. ret = (unsigned long)panic;
  242. }
  243. return ret;
  244. }
  245. int __trace_graph_entry(struct trace_array *tr,
  246. struct ftrace_graph_ent *trace,
  247. unsigned long flags,
  248. int pc)
  249. {
  250. struct trace_event_call *call = &event_funcgraph_entry;
  251. struct ring_buffer_event *event;
  252. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  253. struct ftrace_graph_ent_entry *entry;
  254. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  255. sizeof(*entry), flags, pc);
  256. if (!event)
  257. return 0;
  258. entry = ring_buffer_event_data(event);
  259. entry->graph_ent = *trace;
  260. if (!call_filter_check_discard(call, entry, buffer, event))
  261. __buffer_unlock_commit(buffer, event);
  262. return 1;
  263. }
  264. static inline int ftrace_graph_ignore_irqs(void)
  265. {
  266. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  267. return 0;
  268. return in_irq();
  269. }
  270. int trace_graph_entry(struct ftrace_graph_ent *trace)
  271. {
  272. struct trace_array *tr = graph_array;
  273. struct trace_array_cpu *data;
  274. unsigned long flags;
  275. long disabled;
  276. int ret;
  277. int cpu;
  278. int pc;
  279. if (!ftrace_trace_task(current))
  280. return 0;
  281. /* trace it when it is-nested-in or is a function enabled. */
  282. if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
  283. ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
  284. (max_depth && trace->depth >= max_depth))
  285. return 0;
  286. /*
  287. * Do not trace a function if it's filtered by set_graph_notrace.
  288. * Make the index of ret stack negative to indicate that it should
  289. * ignore further functions. But it needs its own ret stack entry
  290. * to recover the original index in order to continue tracing after
  291. * returning from the function.
  292. */
  293. if (ftrace_graph_notrace_addr(trace->func))
  294. return 1;
  295. local_irq_save(flags);
  296. cpu = raw_smp_processor_id();
  297. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  298. disabled = atomic_inc_return(&data->disabled);
  299. if (likely(disabled == 1)) {
  300. pc = preempt_count();
  301. ret = __trace_graph_entry(tr, trace, flags, pc);
  302. } else {
  303. ret = 0;
  304. }
  305. atomic_dec(&data->disabled);
  306. local_irq_restore(flags);
  307. return ret;
  308. }
  309. static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
  310. {
  311. if (tracing_thresh)
  312. return 1;
  313. else
  314. return trace_graph_entry(trace);
  315. }
  316. static void
  317. __trace_graph_function(struct trace_array *tr,
  318. unsigned long ip, unsigned long flags, int pc)
  319. {
  320. u64 time = trace_clock_local();
  321. struct ftrace_graph_ent ent = {
  322. .func = ip,
  323. .depth = 0,
  324. };
  325. struct ftrace_graph_ret ret = {
  326. .func = ip,
  327. .depth = 0,
  328. .calltime = time,
  329. .rettime = time,
  330. };
  331. __trace_graph_entry(tr, &ent, flags, pc);
  332. __trace_graph_return(tr, &ret, flags, pc);
  333. }
  334. void
  335. trace_graph_function(struct trace_array *tr,
  336. unsigned long ip, unsigned long parent_ip,
  337. unsigned long flags, int pc)
  338. {
  339. __trace_graph_function(tr, ip, flags, pc);
  340. }
  341. void __trace_graph_return(struct trace_array *tr,
  342. struct ftrace_graph_ret *trace,
  343. unsigned long flags,
  344. int pc)
  345. {
  346. struct trace_event_call *call = &event_funcgraph_exit;
  347. struct ring_buffer_event *event;
  348. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  349. struct ftrace_graph_ret_entry *entry;
  350. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  351. sizeof(*entry), flags, pc);
  352. if (!event)
  353. return;
  354. entry = ring_buffer_event_data(event);
  355. entry->ret = *trace;
  356. if (!call_filter_check_discard(call, entry, buffer, event))
  357. __buffer_unlock_commit(buffer, event);
  358. }
  359. void trace_graph_return(struct ftrace_graph_ret *trace)
  360. {
  361. struct trace_array *tr = graph_array;
  362. struct trace_array_cpu *data;
  363. unsigned long flags;
  364. long disabled;
  365. int cpu;
  366. int pc;
  367. local_irq_save(flags);
  368. cpu = raw_smp_processor_id();
  369. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  370. disabled = atomic_inc_return(&data->disabled);
  371. if (likely(disabled == 1)) {
  372. pc = preempt_count();
  373. __trace_graph_return(tr, trace, flags, pc);
  374. }
  375. atomic_dec(&data->disabled);
  376. local_irq_restore(flags);
  377. }
  378. void set_graph_array(struct trace_array *tr)
  379. {
  380. graph_array = tr;
  381. /* Make graph_array visible before we start tracing */
  382. smp_mb();
  383. }
  384. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  385. {
  386. if (tracing_thresh &&
  387. (trace->rettime - trace->calltime < tracing_thresh))
  388. return;
  389. else
  390. trace_graph_return(trace);
  391. }
  392. static int graph_trace_init(struct trace_array *tr)
  393. {
  394. int ret;
  395. set_graph_array(tr);
  396. if (tracing_thresh)
  397. ret = register_ftrace_graph(&trace_graph_thresh_return,
  398. &trace_graph_thresh_entry);
  399. else
  400. ret = register_ftrace_graph(&trace_graph_return,
  401. &trace_graph_entry);
  402. if (ret)
  403. return ret;
  404. tracing_start_cmdline_record();
  405. return 0;
  406. }
  407. static void graph_trace_reset(struct trace_array *tr)
  408. {
  409. tracing_stop_cmdline_record();
  410. unregister_ftrace_graph();
  411. }
  412. static int graph_trace_update_thresh(struct trace_array *tr)
  413. {
  414. graph_trace_reset(tr);
  415. return graph_trace_init(tr);
  416. }
  417. static int max_bytes_for_cpu;
  418. static void print_graph_cpu(struct trace_seq *s, int cpu)
  419. {
  420. /*
  421. * Start with a space character - to make it stand out
  422. * to the right a bit when trace output is pasted into
  423. * email:
  424. */
  425. trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  426. }
  427. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  428. static void print_graph_proc(struct trace_seq *s, pid_t pid)
  429. {
  430. char comm[TASK_COMM_LEN];
  431. /* sign + log10(MAX_INT) + '\0' */
  432. char pid_str[11];
  433. int spaces = 0;
  434. int len;
  435. int i;
  436. trace_find_cmdline(pid, comm);
  437. comm[7] = '\0';
  438. sprintf(pid_str, "%d", pid);
  439. /* 1 stands for the "-" character */
  440. len = strlen(comm) + strlen(pid_str) + 1;
  441. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  442. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  443. /* First spaces to align center */
  444. for (i = 0; i < spaces / 2; i++)
  445. trace_seq_putc(s, ' ');
  446. trace_seq_printf(s, "%s-%s", comm, pid_str);
  447. /* Last spaces to align center */
  448. for (i = 0; i < spaces - (spaces / 2); i++)
  449. trace_seq_putc(s, ' ');
  450. }
  451. static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  452. {
  453. trace_seq_putc(s, ' ');
  454. trace_print_lat_fmt(s, entry);
  455. }
  456. /* If the pid changed since the last trace, output this event */
  457. static void
  458. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  459. {
  460. pid_t prev_pid;
  461. pid_t *last_pid;
  462. if (!data)
  463. return;
  464. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  465. if (*last_pid == pid)
  466. return;
  467. prev_pid = *last_pid;
  468. *last_pid = pid;
  469. if (prev_pid == -1)
  470. return;
  471. /*
  472. * Context-switch trace line:
  473. ------------------------------------------
  474. | 1) migration/0--1 => sshd-1755
  475. ------------------------------------------
  476. */
  477. trace_seq_puts(s, " ------------------------------------------\n");
  478. print_graph_cpu(s, cpu);
  479. print_graph_proc(s, prev_pid);
  480. trace_seq_puts(s, " => ");
  481. print_graph_proc(s, pid);
  482. trace_seq_puts(s, "\n ------------------------------------------\n\n");
  483. }
  484. static struct ftrace_graph_ret_entry *
  485. get_return_for_leaf(struct trace_iterator *iter,
  486. struct ftrace_graph_ent_entry *curr)
  487. {
  488. struct fgraph_data *data = iter->private;
  489. struct ring_buffer_iter *ring_iter = NULL;
  490. struct ring_buffer_event *event;
  491. struct ftrace_graph_ret_entry *next;
  492. /*
  493. * If the previous output failed to write to the seq buffer,
  494. * then we just reuse the data from before.
  495. */
  496. if (data && data->failed) {
  497. curr = &data->ent;
  498. next = &data->ret;
  499. } else {
  500. ring_iter = trace_buffer_iter(iter, iter->cpu);
  501. /* First peek to compare current entry and the next one */
  502. if (ring_iter)
  503. event = ring_buffer_iter_peek(ring_iter, NULL);
  504. else {
  505. /*
  506. * We need to consume the current entry to see
  507. * the next one.
  508. */
  509. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  510. NULL, NULL);
  511. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  512. NULL, NULL);
  513. }
  514. if (!event)
  515. return NULL;
  516. next = ring_buffer_event_data(event);
  517. if (data) {
  518. /*
  519. * Save current and next entries for later reference
  520. * if the output fails.
  521. */
  522. data->ent = *curr;
  523. /*
  524. * If the next event is not a return type, then
  525. * we only care about what type it is. Otherwise we can
  526. * safely copy the entire event.
  527. */
  528. if (next->ent.type == TRACE_GRAPH_RET)
  529. data->ret = *next;
  530. else
  531. data->ret.ent.type = next->ent.type;
  532. }
  533. }
  534. if (next->ent.type != TRACE_GRAPH_RET)
  535. return NULL;
  536. if (curr->ent.pid != next->ent.pid ||
  537. curr->graph_ent.func != next->ret.func)
  538. return NULL;
  539. /* this is a leaf, now advance the iterator */
  540. if (ring_iter)
  541. ring_buffer_read(ring_iter, NULL);
  542. return next;
  543. }
  544. static void print_graph_abs_time(u64 t, struct trace_seq *s)
  545. {
  546. unsigned long usecs_rem;
  547. usecs_rem = do_div(t, NSEC_PER_SEC);
  548. usecs_rem /= 1000;
  549. trace_seq_printf(s, "%5lu.%06lu | ",
  550. (unsigned long)t, usecs_rem);
  551. }
  552. static void
  553. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  554. enum trace_type type, int cpu, pid_t pid, u32 flags)
  555. {
  556. struct trace_array *tr = iter->tr;
  557. struct trace_seq *s = &iter->seq;
  558. struct trace_entry *ent = iter->ent;
  559. if (addr < (unsigned long)__irqentry_text_start ||
  560. addr >= (unsigned long)__irqentry_text_end)
  561. return;
  562. if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
  563. /* Absolute time */
  564. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  565. print_graph_abs_time(iter->ts, s);
  566. /* Cpu */
  567. if (flags & TRACE_GRAPH_PRINT_CPU)
  568. print_graph_cpu(s, cpu);
  569. /* Proc */
  570. if (flags & TRACE_GRAPH_PRINT_PROC) {
  571. print_graph_proc(s, pid);
  572. trace_seq_puts(s, " | ");
  573. }
  574. /* Latency format */
  575. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  576. print_graph_lat_fmt(s, ent);
  577. }
  578. /* No overhead */
  579. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
  580. if (type == TRACE_GRAPH_ENT)
  581. trace_seq_puts(s, "==========>");
  582. else
  583. trace_seq_puts(s, "<==========");
  584. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
  585. trace_seq_putc(s, '\n');
  586. }
  587. void
  588. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  589. {
  590. unsigned long nsecs_rem = do_div(duration, 1000);
  591. /* log10(ULONG_MAX) + '\0' */
  592. char usecs_str[21];
  593. char nsecs_str[5];
  594. int len;
  595. int i;
  596. sprintf(usecs_str, "%lu", (unsigned long) duration);
  597. /* Print msecs */
  598. trace_seq_printf(s, "%s", usecs_str);
  599. len = strlen(usecs_str);
  600. /* Print nsecs (we don't want to exceed 7 numbers) */
  601. if (len < 7) {
  602. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  603. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  604. trace_seq_printf(s, ".%s", nsecs_str);
  605. len += strlen(nsecs_str) + 1;
  606. }
  607. trace_seq_puts(s, " us ");
  608. /* Print remaining spaces to fit the row's width */
  609. for (i = len; i < 8; i++)
  610. trace_seq_putc(s, ' ');
  611. }
  612. static void
  613. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  614. struct trace_seq *s, u32 flags)
  615. {
  616. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  617. !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  618. return;
  619. /* No real adata, just filling the column with spaces */
  620. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  621. case FLAGS_FILL_FULL:
  622. trace_seq_puts(s, " | ");
  623. return;
  624. case FLAGS_FILL_START:
  625. trace_seq_puts(s, " ");
  626. return;
  627. case FLAGS_FILL_END:
  628. trace_seq_puts(s, " |");
  629. return;
  630. }
  631. /* Signal a overhead of time execution to the output */
  632. if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
  633. trace_seq_printf(s, "%c ", trace_find_mark(duration));
  634. else
  635. trace_seq_puts(s, " ");
  636. trace_print_graph_duration(duration, s);
  637. trace_seq_puts(s, "| ");
  638. }
  639. /* Case of a leaf function on its call entry */
  640. static enum print_line_t
  641. print_graph_entry_leaf(struct trace_iterator *iter,
  642. struct ftrace_graph_ent_entry *entry,
  643. struct ftrace_graph_ret_entry *ret_entry,
  644. struct trace_seq *s, u32 flags)
  645. {
  646. struct fgraph_data *data = iter->private;
  647. struct trace_array *tr = iter->tr;
  648. struct ftrace_graph_ret *graph_ret;
  649. struct ftrace_graph_ent *call;
  650. unsigned long long duration;
  651. int i;
  652. graph_ret = &ret_entry->ret;
  653. call = &entry->graph_ent;
  654. duration = graph_ret->rettime - graph_ret->calltime;
  655. if (data) {
  656. struct fgraph_cpu_data *cpu_data;
  657. int cpu = iter->cpu;
  658. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  659. /*
  660. * Comments display at + 1 to depth. Since
  661. * this is a leaf function, keep the comments
  662. * equal to this depth.
  663. */
  664. cpu_data->depth = call->depth - 1;
  665. /* No need to keep this function around for this depth */
  666. if (call->depth < FTRACE_RETFUNC_DEPTH)
  667. cpu_data->enter_funcs[call->depth] = 0;
  668. }
  669. /* Overhead and duration */
  670. print_graph_duration(tr, duration, s, flags);
  671. /* Function */
  672. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  673. trace_seq_putc(s, ' ');
  674. trace_seq_printf(s, "%ps();\n", (void *)call->func);
  675. return trace_handle_return(s);
  676. }
  677. static enum print_line_t
  678. print_graph_entry_nested(struct trace_iterator *iter,
  679. struct ftrace_graph_ent_entry *entry,
  680. struct trace_seq *s, int cpu, u32 flags)
  681. {
  682. struct ftrace_graph_ent *call = &entry->graph_ent;
  683. struct fgraph_data *data = iter->private;
  684. struct trace_array *tr = iter->tr;
  685. int i;
  686. if (data) {
  687. struct fgraph_cpu_data *cpu_data;
  688. int cpu = iter->cpu;
  689. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  690. cpu_data->depth = call->depth;
  691. /* Save this function pointer to see if the exit matches */
  692. if (call->depth < FTRACE_RETFUNC_DEPTH)
  693. cpu_data->enter_funcs[call->depth] = call->func;
  694. }
  695. /* No time */
  696. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  697. /* Function */
  698. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  699. trace_seq_putc(s, ' ');
  700. trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  701. if (trace_seq_has_overflowed(s))
  702. return TRACE_TYPE_PARTIAL_LINE;
  703. /*
  704. * we already consumed the current entry to check the next one
  705. * and see if this is a leaf.
  706. */
  707. return TRACE_TYPE_NO_CONSUME;
  708. }
  709. static void
  710. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  711. int type, unsigned long addr, u32 flags)
  712. {
  713. struct fgraph_data *data = iter->private;
  714. struct trace_entry *ent = iter->ent;
  715. struct trace_array *tr = iter->tr;
  716. int cpu = iter->cpu;
  717. /* Pid */
  718. verif_pid(s, ent->pid, cpu, data);
  719. if (type)
  720. /* Interrupt */
  721. print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  722. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  723. return;
  724. /* Absolute time */
  725. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  726. print_graph_abs_time(iter->ts, s);
  727. /* Cpu */
  728. if (flags & TRACE_GRAPH_PRINT_CPU)
  729. print_graph_cpu(s, cpu);
  730. /* Proc */
  731. if (flags & TRACE_GRAPH_PRINT_PROC) {
  732. print_graph_proc(s, ent->pid);
  733. trace_seq_puts(s, " | ");
  734. }
  735. /* Latency format */
  736. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  737. print_graph_lat_fmt(s, ent);
  738. return;
  739. }
  740. /*
  741. * Entry check for irq code
  742. *
  743. * returns 1 if
  744. * - we are inside irq code
  745. * - we just entered irq code
  746. *
  747. * retunns 0 if
  748. * - funcgraph-interrupts option is set
  749. * - we are not inside irq code
  750. */
  751. static int
  752. check_irq_entry(struct trace_iterator *iter, u32 flags,
  753. unsigned long addr, int depth)
  754. {
  755. int cpu = iter->cpu;
  756. int *depth_irq;
  757. struct fgraph_data *data = iter->private;
  758. /*
  759. * If we are either displaying irqs, or we got called as
  760. * a graph event and private data does not exist,
  761. * then we bypass the irq check.
  762. */
  763. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  764. (!data))
  765. return 0;
  766. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  767. /*
  768. * We are inside the irq code
  769. */
  770. if (*depth_irq >= 0)
  771. return 1;
  772. if ((addr < (unsigned long)__irqentry_text_start) ||
  773. (addr >= (unsigned long)__irqentry_text_end))
  774. return 0;
  775. /*
  776. * We are entering irq code.
  777. */
  778. *depth_irq = depth;
  779. return 1;
  780. }
  781. /*
  782. * Return check for irq code
  783. *
  784. * returns 1 if
  785. * - we are inside irq code
  786. * - we just left irq code
  787. *
  788. * returns 0 if
  789. * - funcgraph-interrupts option is set
  790. * - we are not inside irq code
  791. */
  792. static int
  793. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  794. {
  795. int cpu = iter->cpu;
  796. int *depth_irq;
  797. struct fgraph_data *data = iter->private;
  798. /*
  799. * If we are either displaying irqs, or we got called as
  800. * a graph event and private data does not exist,
  801. * then we bypass the irq check.
  802. */
  803. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  804. (!data))
  805. return 0;
  806. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  807. /*
  808. * We are not inside the irq code.
  809. */
  810. if (*depth_irq == -1)
  811. return 0;
  812. /*
  813. * We are inside the irq code, and this is returning entry.
  814. * Let's not trace it and clear the entry depth, since
  815. * we are out of irq code.
  816. *
  817. * This condition ensures that we 'leave the irq code' once
  818. * we are out of the entry depth. Thus protecting us from
  819. * the RETURN entry loss.
  820. */
  821. if (*depth_irq >= depth) {
  822. *depth_irq = -1;
  823. return 1;
  824. }
  825. /*
  826. * We are inside the irq code, and this is not the entry.
  827. */
  828. return 1;
  829. }
  830. static enum print_line_t
  831. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  832. struct trace_iterator *iter, u32 flags)
  833. {
  834. struct fgraph_data *data = iter->private;
  835. struct ftrace_graph_ent *call = &field->graph_ent;
  836. struct ftrace_graph_ret_entry *leaf_ret;
  837. static enum print_line_t ret;
  838. int cpu = iter->cpu;
  839. if (check_irq_entry(iter, flags, call->func, call->depth))
  840. return TRACE_TYPE_HANDLED;
  841. print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
  842. leaf_ret = get_return_for_leaf(iter, field);
  843. if (leaf_ret)
  844. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  845. else
  846. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  847. if (data) {
  848. /*
  849. * If we failed to write our output, then we need to make
  850. * note of it. Because we already consumed our entry.
  851. */
  852. if (s->full) {
  853. data->failed = 1;
  854. data->cpu = cpu;
  855. } else
  856. data->failed = 0;
  857. }
  858. return ret;
  859. }
  860. static enum print_line_t
  861. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  862. struct trace_entry *ent, struct trace_iterator *iter,
  863. u32 flags)
  864. {
  865. unsigned long long duration = trace->rettime - trace->calltime;
  866. struct fgraph_data *data = iter->private;
  867. struct trace_array *tr = iter->tr;
  868. pid_t pid = ent->pid;
  869. int cpu = iter->cpu;
  870. int func_match = 1;
  871. int i;
  872. if (check_irq_return(iter, flags, trace->depth))
  873. return TRACE_TYPE_HANDLED;
  874. if (data) {
  875. struct fgraph_cpu_data *cpu_data;
  876. int cpu = iter->cpu;
  877. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  878. /*
  879. * Comments display at + 1 to depth. This is the
  880. * return from a function, we now want the comments
  881. * to display at the same level of the bracket.
  882. */
  883. cpu_data->depth = trace->depth - 1;
  884. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  885. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  886. func_match = 0;
  887. cpu_data->enter_funcs[trace->depth] = 0;
  888. }
  889. }
  890. print_graph_prologue(iter, s, 0, 0, flags);
  891. /* Overhead and duration */
  892. print_graph_duration(tr, duration, s, flags);
  893. /* Closing brace */
  894. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
  895. trace_seq_putc(s, ' ');
  896. /*
  897. * If the return function does not have a matching entry,
  898. * then the entry was lost. Instead of just printing
  899. * the '}' and letting the user guess what function this
  900. * belongs to, write out the function name. Always do
  901. * that if the funcgraph-tail option is enabled.
  902. */
  903. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
  904. trace_seq_puts(s, "}\n");
  905. else
  906. trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  907. /* Overrun */
  908. if (flags & TRACE_GRAPH_PRINT_OVERRUN)
  909. trace_seq_printf(s, " (Overruns: %lu)\n",
  910. trace->overrun);
  911. print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  912. cpu, pid, flags);
  913. return trace_handle_return(s);
  914. }
  915. static enum print_line_t
  916. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  917. struct trace_iterator *iter, u32 flags)
  918. {
  919. struct trace_array *tr = iter->tr;
  920. unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
  921. struct fgraph_data *data = iter->private;
  922. struct trace_event *event;
  923. int depth = 0;
  924. int ret;
  925. int i;
  926. if (data)
  927. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  928. print_graph_prologue(iter, s, 0, 0, flags);
  929. /* No time */
  930. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  931. /* Indentation */
  932. if (depth > 0)
  933. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
  934. trace_seq_putc(s, ' ');
  935. /* The comment */
  936. trace_seq_puts(s, "/* ");
  937. switch (iter->ent->type) {
  938. case TRACE_BPRINT:
  939. ret = trace_print_bprintk_msg_only(iter);
  940. if (ret != TRACE_TYPE_HANDLED)
  941. return ret;
  942. break;
  943. case TRACE_PRINT:
  944. ret = trace_print_printk_msg_only(iter);
  945. if (ret != TRACE_TYPE_HANDLED)
  946. return ret;
  947. break;
  948. default:
  949. event = ftrace_find_event(ent->type);
  950. if (!event)
  951. return TRACE_TYPE_UNHANDLED;
  952. ret = event->funcs->trace(iter, sym_flags, event);
  953. if (ret != TRACE_TYPE_HANDLED)
  954. return ret;
  955. }
  956. if (trace_seq_has_overflowed(s))
  957. goto out;
  958. /* Strip ending newline */
  959. if (s->buffer[s->seq.len - 1] == '\n') {
  960. s->buffer[s->seq.len - 1] = '\0';
  961. s->seq.len--;
  962. }
  963. trace_seq_puts(s, " */\n");
  964. out:
  965. return trace_handle_return(s);
  966. }
  967. enum print_line_t
  968. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  969. {
  970. struct ftrace_graph_ent_entry *field;
  971. struct fgraph_data *data = iter->private;
  972. struct trace_entry *entry = iter->ent;
  973. struct trace_seq *s = &iter->seq;
  974. int cpu = iter->cpu;
  975. int ret;
  976. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  977. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  978. return TRACE_TYPE_HANDLED;
  979. }
  980. /*
  981. * If the last output failed, there's a possibility we need
  982. * to print out the missing entry which would never go out.
  983. */
  984. if (data && data->failed) {
  985. field = &data->ent;
  986. iter->cpu = data->cpu;
  987. ret = print_graph_entry(field, s, iter, flags);
  988. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  989. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  990. ret = TRACE_TYPE_NO_CONSUME;
  991. }
  992. iter->cpu = cpu;
  993. return ret;
  994. }
  995. switch (entry->type) {
  996. case TRACE_GRAPH_ENT: {
  997. /*
  998. * print_graph_entry() may consume the current event,
  999. * thus @field may become invalid, so we need to save it.
  1000. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1001. * it can be safely saved at the stack.
  1002. */
  1003. struct ftrace_graph_ent_entry saved;
  1004. trace_assign_type(field, entry);
  1005. saved = *field;
  1006. return print_graph_entry(&saved, s, iter, flags);
  1007. }
  1008. case TRACE_GRAPH_RET: {
  1009. struct ftrace_graph_ret_entry *field;
  1010. trace_assign_type(field, entry);
  1011. return print_graph_return(&field->ret, s, entry, iter, flags);
  1012. }
  1013. case TRACE_STACK:
  1014. case TRACE_FN:
  1015. /* dont trace stack and functions as comments */
  1016. return TRACE_TYPE_UNHANDLED;
  1017. default:
  1018. return print_graph_comment(s, entry, iter, flags);
  1019. }
  1020. return TRACE_TYPE_HANDLED;
  1021. }
  1022. static enum print_line_t
  1023. print_graph_function(struct trace_iterator *iter)
  1024. {
  1025. return print_graph_function_flags(iter, tracer_flags.val);
  1026. }
  1027. static enum print_line_t
  1028. print_graph_function_event(struct trace_iterator *iter, int flags,
  1029. struct trace_event *event)
  1030. {
  1031. return print_graph_function(iter);
  1032. }
  1033. static void print_lat_header(struct seq_file *s, u32 flags)
  1034. {
  1035. static const char spaces[] = " " /* 16 spaces */
  1036. " " /* 4 spaces */
  1037. " "; /* 17 spaces */
  1038. int size = 0;
  1039. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1040. size += 16;
  1041. if (flags & TRACE_GRAPH_PRINT_CPU)
  1042. size += 4;
  1043. if (flags & TRACE_GRAPH_PRINT_PROC)
  1044. size += 17;
  1045. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1046. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1047. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1048. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1049. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1050. }
  1051. static void __print_graph_headers_flags(struct trace_array *tr,
  1052. struct seq_file *s, u32 flags)
  1053. {
  1054. int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
  1055. if (lat)
  1056. print_lat_header(s, flags);
  1057. /* 1st line */
  1058. seq_putc(s, '#');
  1059. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1060. seq_puts(s, " TIME ");
  1061. if (flags & TRACE_GRAPH_PRINT_CPU)
  1062. seq_puts(s, " CPU");
  1063. if (flags & TRACE_GRAPH_PRINT_PROC)
  1064. seq_puts(s, " TASK/PID ");
  1065. if (lat)
  1066. seq_puts(s, "||||");
  1067. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1068. seq_puts(s, " DURATION ");
  1069. seq_puts(s, " FUNCTION CALLS\n");
  1070. /* 2nd line */
  1071. seq_putc(s, '#');
  1072. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1073. seq_puts(s, " | ");
  1074. if (flags & TRACE_GRAPH_PRINT_CPU)
  1075. seq_puts(s, " | ");
  1076. if (flags & TRACE_GRAPH_PRINT_PROC)
  1077. seq_puts(s, " | | ");
  1078. if (lat)
  1079. seq_puts(s, "||||");
  1080. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1081. seq_puts(s, " | | ");
  1082. seq_puts(s, " | | | |\n");
  1083. }
  1084. static void print_graph_headers(struct seq_file *s)
  1085. {
  1086. print_graph_headers_flags(s, tracer_flags.val);
  1087. }
  1088. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1089. {
  1090. struct trace_iterator *iter = s->private;
  1091. struct trace_array *tr = iter->tr;
  1092. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  1093. return;
  1094. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
  1095. /* print nothing if the buffers are empty */
  1096. if (trace_empty(iter))
  1097. return;
  1098. print_trace_header(s, iter);
  1099. }
  1100. __print_graph_headers_flags(tr, s, flags);
  1101. }
  1102. void graph_trace_open(struct trace_iterator *iter)
  1103. {
  1104. /* pid and depth on the last trace processed */
  1105. struct fgraph_data *data;
  1106. gfp_t gfpflags;
  1107. int cpu;
  1108. iter->private = NULL;
  1109. /* We can be called in atomic context via ftrace_dump() */
  1110. gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  1111. data = kzalloc(sizeof(*data), gfpflags);
  1112. if (!data)
  1113. goto out_err;
  1114. data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
  1115. if (!data->cpu_data)
  1116. goto out_err_free;
  1117. for_each_possible_cpu(cpu) {
  1118. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1119. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1120. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1121. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1122. *pid = -1;
  1123. *depth = 0;
  1124. *ignore = 0;
  1125. *depth_irq = -1;
  1126. }
  1127. iter->private = data;
  1128. return;
  1129. out_err_free:
  1130. kfree(data);
  1131. out_err:
  1132. pr_warn("function graph tracer: not enough memory\n");
  1133. }
  1134. void graph_trace_close(struct trace_iterator *iter)
  1135. {
  1136. struct fgraph_data *data = iter->private;
  1137. if (data) {
  1138. free_percpu(data->cpu_data);
  1139. kfree(data);
  1140. }
  1141. }
  1142. static int
  1143. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1144. {
  1145. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1146. ftrace_graph_skip_irqs = !set;
  1147. if (bit == TRACE_GRAPH_SLEEP_TIME)
  1148. ftrace_graph_sleep_time_control(set);
  1149. if (bit == TRACE_GRAPH_GRAPH_TIME)
  1150. ftrace_graph_graph_time_control(set);
  1151. return 0;
  1152. }
  1153. static struct trace_event_functions graph_functions = {
  1154. .trace = print_graph_function_event,
  1155. };
  1156. static struct trace_event graph_trace_entry_event = {
  1157. .type = TRACE_GRAPH_ENT,
  1158. .funcs = &graph_functions,
  1159. };
  1160. static struct trace_event graph_trace_ret_event = {
  1161. .type = TRACE_GRAPH_RET,
  1162. .funcs = &graph_functions
  1163. };
  1164. static struct tracer graph_trace __tracer_data = {
  1165. .name = "function_graph",
  1166. .update_thresh = graph_trace_update_thresh,
  1167. .open = graph_trace_open,
  1168. .pipe_open = graph_trace_open,
  1169. .close = graph_trace_close,
  1170. .pipe_close = graph_trace_close,
  1171. .init = graph_trace_init,
  1172. .reset = graph_trace_reset,
  1173. .print_line = print_graph_function,
  1174. .print_header = print_graph_headers,
  1175. .flags = &tracer_flags,
  1176. .set_flag = func_graph_set_flag,
  1177. #ifdef CONFIG_FTRACE_SELFTEST
  1178. .selftest = trace_selftest_startup_function_graph,
  1179. #endif
  1180. };
  1181. static ssize_t
  1182. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1183. loff_t *ppos)
  1184. {
  1185. unsigned long val;
  1186. int ret;
  1187. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1188. if (ret)
  1189. return ret;
  1190. max_depth = val;
  1191. *ppos += cnt;
  1192. return cnt;
  1193. }
  1194. static ssize_t
  1195. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1196. loff_t *ppos)
  1197. {
  1198. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1199. int n;
  1200. n = sprintf(buf, "%d\n", max_depth);
  1201. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1202. }
  1203. static const struct file_operations graph_depth_fops = {
  1204. .open = tracing_open_generic,
  1205. .write = graph_depth_write,
  1206. .read = graph_depth_read,
  1207. .llseek = generic_file_llseek,
  1208. };
  1209. static __init int init_graph_tracefs(void)
  1210. {
  1211. struct dentry *d_tracer;
  1212. d_tracer = tracing_init_dentry();
  1213. if (IS_ERR(d_tracer))
  1214. return 0;
  1215. trace_create_file("max_graph_depth", 0644, d_tracer,
  1216. NULL, &graph_depth_fops);
  1217. return 0;
  1218. }
  1219. fs_initcall(init_graph_tracefs);
  1220. static __init int init_graph_trace(void)
  1221. {
  1222. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1223. if (!register_trace_event(&graph_trace_entry_event)) {
  1224. pr_warn("Warning: could not register graph trace events\n");
  1225. return 1;
  1226. }
  1227. if (!register_trace_event(&graph_trace_ret_event)) {
  1228. pr_warn("Warning: could not register graph trace events\n");
  1229. return 1;
  1230. }
  1231. return register_tracer(&graph_trace);
  1232. }
  1233. core_initcall(init_graph_trace);