trace_functions_graph.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/uaccess.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. static bool kill_ftrace_graph;
  17. /**
  18. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  19. *
  20. * ftrace_graph_stop() is called when a severe error is detected in
  21. * the function graph tracing. This function is called by the critical
  22. * paths of function graph to keep those paths from doing any more harm.
  23. */
  24. bool ftrace_graph_is_dead(void)
  25. {
  26. return kill_ftrace_graph;
  27. }
  28. /**
  29. * ftrace_graph_stop - set to permanently disable function graph tracincg
  30. *
  31. * In case of an error int function graph tracing, this is called
  32. * to try to keep function graph tracing from causing any more harm.
  33. * Usually this is pretty severe and this is called to try to at least
  34. * get a warning out to the user.
  35. */
  36. void ftrace_graph_stop(void)
  37. {
  38. kill_ftrace_graph = true;
  39. }
  40. /* When set, irq functions will be ignored */
  41. static int ftrace_graph_skip_irqs;
  42. struct fgraph_cpu_data {
  43. pid_t last_pid;
  44. int depth;
  45. int depth_irq;
  46. int ignore;
  47. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  48. };
  49. struct fgraph_data {
  50. struct fgraph_cpu_data __percpu *cpu_data;
  51. /* Place to preserve last processed entry. */
  52. struct ftrace_graph_ent_entry ent;
  53. struct ftrace_graph_ret_entry ret;
  54. int failed;
  55. int cpu;
  56. };
  57. #define TRACE_GRAPH_INDENT 2
  58. static unsigned int max_depth;
  59. static struct tracer_opt trace_opts[] = {
  60. /* Display overruns? (for self-debug purpose) */
  61. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  62. /* Display CPU ? */
  63. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  64. /* Display Overhead ? */
  65. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  66. /* Display proc name/pid */
  67. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  68. /* Display duration of execution */
  69. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  70. /* Display absolute time of an entry */
  71. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  72. /* Display interrupts */
  73. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  74. /* Display function name after trailing } */
  75. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  76. /* Include sleep time (scheduled out) between entry and return */
  77. { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  78. /* Include time within nested functions */
  79. { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  80. { } /* Empty entry */
  81. };
  82. static struct tracer_flags tracer_flags = {
  83. /* Don't display overruns, proc, or tail by default */
  84. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  85. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  86. TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  87. .opts = trace_opts
  88. };
  89. static struct trace_array *graph_array;
  90. /*
  91. * DURATION column is being also used to display IRQ signs,
  92. * following values are used by print_graph_irq and others
  93. * to fill in space into DURATION column.
  94. */
  95. enum {
  96. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  97. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  98. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  99. };
  100. static void
  101. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  102. struct trace_seq *s, u32 flags);
  103. /* Add a function return address to the trace stack on thread info.*/
  104. int
  105. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  106. unsigned long frame_pointer)
  107. {
  108. unsigned long long calltime;
  109. int index;
  110. if (unlikely(ftrace_graph_is_dead()))
  111. return -EBUSY;
  112. if (!current->ret_stack)
  113. return -EBUSY;
  114. /*
  115. * We must make sure the ret_stack is tested before we read
  116. * anything else.
  117. */
  118. smp_rmb();
  119. /* The return trace stack is full */
  120. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  121. atomic_inc(&current->trace_overrun);
  122. return -EBUSY;
  123. }
  124. /*
  125. * The curr_ret_stack is an index to ftrace return stack of
  126. * current task. Its value should be in [0, FTRACE_RETFUNC_
  127. * DEPTH) when the function graph tracer is used. To support
  128. * filtering out specific functions, it makes the index
  129. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  130. * so when it sees a negative index the ftrace will ignore
  131. * the record. And the index gets recovered when returning
  132. * from the filtered function by adding the FTRACE_NOTRACE_
  133. * DEPTH and then it'll continue to record functions normally.
  134. *
  135. * The curr_ret_stack is initialized to -1 and get increased
  136. * in this function. So it can be less than -1 only if it was
  137. * filtered out via ftrace_graph_notrace_addr() which can be
  138. * set from set_graph_notrace file in tracefs by user.
  139. */
  140. if (current->curr_ret_stack < -1)
  141. return -EBUSY;
  142. calltime = trace_clock_local();
  143. index = ++current->curr_ret_stack;
  144. if (ftrace_graph_notrace_addr(func))
  145. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  146. barrier();
  147. current->ret_stack[index].ret = ret;
  148. current->ret_stack[index].func = func;
  149. current->ret_stack[index].calltime = calltime;
  150. current->ret_stack[index].subtime = 0;
  151. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  152. current->ret_stack[index].fp = frame_pointer;
  153. #endif
  154. *depth = current->curr_ret_stack;
  155. return 0;
  156. }
  157. /* Retrieve a function return address to the trace stack on thread info.*/
  158. static void
  159. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  160. unsigned long frame_pointer)
  161. {
  162. int index;
  163. index = current->curr_ret_stack;
  164. /*
  165. * A negative index here means that it's just returned from a
  166. * notrace'd function. Recover index to get an original
  167. * return address. See ftrace_push_return_trace().
  168. *
  169. * TODO: Need to check whether the stack gets corrupted.
  170. */
  171. if (index < 0)
  172. index += FTRACE_NOTRACE_DEPTH;
  173. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  174. ftrace_graph_stop();
  175. WARN_ON(1);
  176. /* Might as well panic, otherwise we have no where to go */
  177. *ret = (unsigned long)panic;
  178. return;
  179. }
  180. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  181. /*
  182. * The arch may choose to record the frame pointer used
  183. * and check it here to make sure that it is what we expect it
  184. * to be. If gcc does not set the place holder of the return
  185. * address in the frame pointer, and does a copy instead, then
  186. * the function graph trace will fail. This test detects this
  187. * case.
  188. *
  189. * Currently, x86_32 with optimize for size (-Os) makes the latest
  190. * gcc do the above.
  191. *
  192. * Note, -mfentry does not use frame pointers, and this test
  193. * is not needed if CC_USING_FENTRY is set.
  194. */
  195. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  196. ftrace_graph_stop();
  197. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  198. " from func %ps return to %lx\n",
  199. current->ret_stack[index].fp,
  200. frame_pointer,
  201. (void *)current->ret_stack[index].func,
  202. current->ret_stack[index].ret);
  203. *ret = (unsigned long)panic;
  204. return;
  205. }
  206. #endif
  207. *ret = current->ret_stack[index].ret;
  208. trace->func = current->ret_stack[index].func;
  209. trace->calltime = current->ret_stack[index].calltime;
  210. trace->overrun = atomic_read(&current->trace_overrun);
  211. trace->depth = index;
  212. }
  213. /*
  214. * Send the trace to the ring-buffer.
  215. * @return the original return address.
  216. */
  217. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  218. {
  219. struct ftrace_graph_ret trace;
  220. unsigned long ret;
  221. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  222. trace.rettime = trace_clock_local();
  223. barrier();
  224. current->curr_ret_stack--;
  225. /*
  226. * The curr_ret_stack can be less than -1 only if it was
  227. * filtered out and it's about to return from the function.
  228. * Recover the index and continue to trace normal functions.
  229. */
  230. if (current->curr_ret_stack < -1) {
  231. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  232. return ret;
  233. }
  234. /*
  235. * The trace should run after decrementing the ret counter
  236. * in case an interrupt were to come in. We don't want to
  237. * lose the interrupt if max_depth is set.
  238. */
  239. ftrace_graph_return(&trace);
  240. if (unlikely(!ret)) {
  241. ftrace_graph_stop();
  242. WARN_ON(1);
  243. /* Might as well panic. What else to do? */
  244. ret = (unsigned long)panic;
  245. }
  246. return ret;
  247. }
  248. int __trace_graph_entry(struct trace_array *tr,
  249. struct ftrace_graph_ent *trace,
  250. unsigned long flags,
  251. int pc)
  252. {
  253. struct trace_event_call *call = &event_funcgraph_entry;
  254. struct ring_buffer_event *event;
  255. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  256. struct ftrace_graph_ent_entry *entry;
  257. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  258. sizeof(*entry), flags, pc);
  259. if (!event)
  260. return 0;
  261. entry = ring_buffer_event_data(event);
  262. entry->graph_ent = *trace;
  263. if (!call_filter_check_discard(call, entry, buffer, event))
  264. __buffer_unlock_commit(buffer, event);
  265. return 1;
  266. }
  267. static inline int ftrace_graph_ignore_irqs(void)
  268. {
  269. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  270. return 0;
  271. return in_irq();
  272. }
  273. int trace_graph_entry(struct ftrace_graph_ent *trace)
  274. {
  275. struct trace_array *tr = graph_array;
  276. struct trace_array_cpu *data;
  277. unsigned long flags;
  278. long disabled;
  279. int ret;
  280. int cpu;
  281. int pc;
  282. if (!ftrace_trace_task(tr))
  283. return 0;
  284. /* trace it when it is-nested-in or is a function enabled. */
  285. if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
  286. ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
  287. (max_depth && trace->depth >= max_depth))
  288. return 0;
  289. /*
  290. * Do not trace a function if it's filtered by set_graph_notrace.
  291. * Make the index of ret stack negative to indicate that it should
  292. * ignore further functions. But it needs its own ret stack entry
  293. * to recover the original index in order to continue tracing after
  294. * returning from the function.
  295. */
  296. if (ftrace_graph_notrace_addr(trace->func))
  297. return 1;
  298. /*
  299. * Stop here if tracing_threshold is set. We only write function return
  300. * events to the ring buffer.
  301. */
  302. if (tracing_thresh)
  303. return 1;
  304. local_irq_save(flags);
  305. cpu = raw_smp_processor_id();
  306. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  307. disabled = atomic_inc_return(&data->disabled);
  308. if (likely(disabled == 1)) {
  309. pc = preempt_count();
  310. ret = __trace_graph_entry(tr, trace, flags, pc);
  311. } else {
  312. ret = 0;
  313. }
  314. atomic_dec(&data->disabled);
  315. local_irq_restore(flags);
  316. return ret;
  317. }
  318. static void
  319. __trace_graph_function(struct trace_array *tr,
  320. unsigned long ip, unsigned long flags, int pc)
  321. {
  322. u64 time = trace_clock_local();
  323. struct ftrace_graph_ent ent = {
  324. .func = ip,
  325. .depth = 0,
  326. };
  327. struct ftrace_graph_ret ret = {
  328. .func = ip,
  329. .depth = 0,
  330. .calltime = time,
  331. .rettime = time,
  332. };
  333. __trace_graph_entry(tr, &ent, flags, pc);
  334. __trace_graph_return(tr, &ret, flags, pc);
  335. }
  336. void
  337. trace_graph_function(struct trace_array *tr,
  338. unsigned long ip, unsigned long parent_ip,
  339. unsigned long flags, int pc)
  340. {
  341. __trace_graph_function(tr, ip, flags, pc);
  342. }
  343. void __trace_graph_return(struct trace_array *tr,
  344. struct ftrace_graph_ret *trace,
  345. unsigned long flags,
  346. int pc)
  347. {
  348. struct trace_event_call *call = &event_funcgraph_exit;
  349. struct ring_buffer_event *event;
  350. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  351. struct ftrace_graph_ret_entry *entry;
  352. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  353. sizeof(*entry), flags, pc);
  354. if (!event)
  355. return;
  356. entry = ring_buffer_event_data(event);
  357. entry->ret = *trace;
  358. if (!call_filter_check_discard(call, entry, buffer, event))
  359. __buffer_unlock_commit(buffer, event);
  360. }
  361. void trace_graph_return(struct ftrace_graph_ret *trace)
  362. {
  363. struct trace_array *tr = graph_array;
  364. struct trace_array_cpu *data;
  365. unsigned long flags;
  366. long disabled;
  367. int cpu;
  368. int pc;
  369. local_irq_save(flags);
  370. cpu = raw_smp_processor_id();
  371. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  372. disabled = atomic_inc_return(&data->disabled);
  373. if (likely(disabled == 1)) {
  374. pc = preempt_count();
  375. __trace_graph_return(tr, trace, flags, pc);
  376. }
  377. atomic_dec(&data->disabled);
  378. local_irq_restore(flags);
  379. }
  380. void set_graph_array(struct trace_array *tr)
  381. {
  382. graph_array = tr;
  383. /* Make graph_array visible before we start tracing */
  384. smp_mb();
  385. }
  386. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  387. {
  388. if (tracing_thresh &&
  389. (trace->rettime - trace->calltime < tracing_thresh))
  390. return;
  391. else
  392. trace_graph_return(trace);
  393. }
  394. static int graph_trace_init(struct trace_array *tr)
  395. {
  396. int ret;
  397. set_graph_array(tr);
  398. if (tracing_thresh)
  399. ret = register_ftrace_graph(&trace_graph_thresh_return,
  400. &trace_graph_entry);
  401. else
  402. ret = register_ftrace_graph(&trace_graph_return,
  403. &trace_graph_entry);
  404. if (ret)
  405. return ret;
  406. tracing_start_cmdline_record();
  407. return 0;
  408. }
  409. static void graph_trace_reset(struct trace_array *tr)
  410. {
  411. tracing_stop_cmdline_record();
  412. unregister_ftrace_graph();
  413. }
  414. static int graph_trace_update_thresh(struct trace_array *tr)
  415. {
  416. graph_trace_reset(tr);
  417. return graph_trace_init(tr);
  418. }
  419. static int max_bytes_for_cpu;
  420. static void print_graph_cpu(struct trace_seq *s, int cpu)
  421. {
  422. /*
  423. * Start with a space character - to make it stand out
  424. * to the right a bit when trace output is pasted into
  425. * email:
  426. */
  427. trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  428. }
  429. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  430. static void print_graph_proc(struct trace_seq *s, pid_t pid)
  431. {
  432. char comm[TASK_COMM_LEN];
  433. /* sign + log10(MAX_INT) + '\0' */
  434. char pid_str[11];
  435. int spaces = 0;
  436. int len;
  437. int i;
  438. trace_find_cmdline(pid, comm);
  439. comm[7] = '\0';
  440. sprintf(pid_str, "%d", pid);
  441. /* 1 stands for the "-" character */
  442. len = strlen(comm) + strlen(pid_str) + 1;
  443. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  444. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  445. /* First spaces to align center */
  446. for (i = 0; i < spaces / 2; i++)
  447. trace_seq_putc(s, ' ');
  448. trace_seq_printf(s, "%s-%s", comm, pid_str);
  449. /* Last spaces to align center */
  450. for (i = 0; i < spaces - (spaces / 2); i++)
  451. trace_seq_putc(s, ' ');
  452. }
  453. static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  454. {
  455. trace_seq_putc(s, ' ');
  456. trace_print_lat_fmt(s, entry);
  457. }
  458. /* If the pid changed since the last trace, output this event */
  459. static void
  460. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  461. {
  462. pid_t prev_pid;
  463. pid_t *last_pid;
  464. if (!data)
  465. return;
  466. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  467. if (*last_pid == pid)
  468. return;
  469. prev_pid = *last_pid;
  470. *last_pid = pid;
  471. if (prev_pid == -1)
  472. return;
  473. /*
  474. * Context-switch trace line:
  475. ------------------------------------------
  476. | 1) migration/0--1 => sshd-1755
  477. ------------------------------------------
  478. */
  479. trace_seq_puts(s, " ------------------------------------------\n");
  480. print_graph_cpu(s, cpu);
  481. print_graph_proc(s, prev_pid);
  482. trace_seq_puts(s, " => ");
  483. print_graph_proc(s, pid);
  484. trace_seq_puts(s, "\n ------------------------------------------\n\n");
  485. }
  486. static struct ftrace_graph_ret_entry *
  487. get_return_for_leaf(struct trace_iterator *iter,
  488. struct ftrace_graph_ent_entry *curr)
  489. {
  490. struct fgraph_data *data = iter->private;
  491. struct ring_buffer_iter *ring_iter = NULL;
  492. struct ring_buffer_event *event;
  493. struct ftrace_graph_ret_entry *next;
  494. /*
  495. * If the previous output failed to write to the seq buffer,
  496. * then we just reuse the data from before.
  497. */
  498. if (data && data->failed) {
  499. curr = &data->ent;
  500. next = &data->ret;
  501. } else {
  502. ring_iter = trace_buffer_iter(iter, iter->cpu);
  503. /* First peek to compare current entry and the next one */
  504. if (ring_iter)
  505. event = ring_buffer_iter_peek(ring_iter, NULL);
  506. else {
  507. /*
  508. * We need to consume the current entry to see
  509. * the next one.
  510. */
  511. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  512. NULL, NULL);
  513. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  514. NULL, NULL);
  515. }
  516. if (!event)
  517. return NULL;
  518. next = ring_buffer_event_data(event);
  519. if (data) {
  520. /*
  521. * Save current and next entries for later reference
  522. * if the output fails.
  523. */
  524. data->ent = *curr;
  525. /*
  526. * If the next event is not a return type, then
  527. * we only care about what type it is. Otherwise we can
  528. * safely copy the entire event.
  529. */
  530. if (next->ent.type == TRACE_GRAPH_RET)
  531. data->ret = *next;
  532. else
  533. data->ret.ent.type = next->ent.type;
  534. }
  535. }
  536. if (next->ent.type != TRACE_GRAPH_RET)
  537. return NULL;
  538. if (curr->ent.pid != next->ent.pid ||
  539. curr->graph_ent.func != next->ret.func)
  540. return NULL;
  541. /* this is a leaf, now advance the iterator */
  542. if (ring_iter)
  543. ring_buffer_read(ring_iter, NULL);
  544. return next;
  545. }
  546. static void print_graph_abs_time(u64 t, struct trace_seq *s)
  547. {
  548. unsigned long usecs_rem;
  549. usecs_rem = do_div(t, NSEC_PER_SEC);
  550. usecs_rem /= 1000;
  551. trace_seq_printf(s, "%5lu.%06lu | ",
  552. (unsigned long)t, usecs_rem);
  553. }
  554. static void
  555. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  556. enum trace_type type, int cpu, pid_t pid, u32 flags)
  557. {
  558. struct trace_array *tr = iter->tr;
  559. struct trace_seq *s = &iter->seq;
  560. struct trace_entry *ent = iter->ent;
  561. if (addr < (unsigned long)__irqentry_text_start ||
  562. addr >= (unsigned long)__irqentry_text_end)
  563. return;
  564. if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
  565. /* Absolute time */
  566. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  567. print_graph_abs_time(iter->ts, s);
  568. /* Cpu */
  569. if (flags & TRACE_GRAPH_PRINT_CPU)
  570. print_graph_cpu(s, cpu);
  571. /* Proc */
  572. if (flags & TRACE_GRAPH_PRINT_PROC) {
  573. print_graph_proc(s, pid);
  574. trace_seq_puts(s, " | ");
  575. }
  576. /* Latency format */
  577. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  578. print_graph_lat_fmt(s, ent);
  579. }
  580. /* No overhead */
  581. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
  582. if (type == TRACE_GRAPH_ENT)
  583. trace_seq_puts(s, "==========>");
  584. else
  585. trace_seq_puts(s, "<==========");
  586. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
  587. trace_seq_putc(s, '\n');
  588. }
  589. void
  590. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  591. {
  592. unsigned long nsecs_rem = do_div(duration, 1000);
  593. /* log10(ULONG_MAX) + '\0' */
  594. char usecs_str[21];
  595. char nsecs_str[5];
  596. int len;
  597. int i;
  598. sprintf(usecs_str, "%lu", (unsigned long) duration);
  599. /* Print msecs */
  600. trace_seq_printf(s, "%s", usecs_str);
  601. len = strlen(usecs_str);
  602. /* Print nsecs (we don't want to exceed 7 numbers) */
  603. if (len < 7) {
  604. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  605. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  606. trace_seq_printf(s, ".%s", nsecs_str);
  607. len += strlen(nsecs_str) + 1;
  608. }
  609. trace_seq_puts(s, " us ");
  610. /* Print remaining spaces to fit the row's width */
  611. for (i = len; i < 8; i++)
  612. trace_seq_putc(s, ' ');
  613. }
  614. static void
  615. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  616. struct trace_seq *s, u32 flags)
  617. {
  618. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  619. !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  620. return;
  621. /* No real adata, just filling the column with spaces */
  622. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  623. case FLAGS_FILL_FULL:
  624. trace_seq_puts(s, " | ");
  625. return;
  626. case FLAGS_FILL_START:
  627. trace_seq_puts(s, " ");
  628. return;
  629. case FLAGS_FILL_END:
  630. trace_seq_puts(s, " |");
  631. return;
  632. }
  633. /* Signal a overhead of time execution to the output */
  634. if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
  635. trace_seq_printf(s, "%c ", trace_find_mark(duration));
  636. else
  637. trace_seq_puts(s, " ");
  638. trace_print_graph_duration(duration, s);
  639. trace_seq_puts(s, "| ");
  640. }
  641. /* Case of a leaf function on its call entry */
  642. static enum print_line_t
  643. print_graph_entry_leaf(struct trace_iterator *iter,
  644. struct ftrace_graph_ent_entry *entry,
  645. struct ftrace_graph_ret_entry *ret_entry,
  646. struct trace_seq *s, u32 flags)
  647. {
  648. struct fgraph_data *data = iter->private;
  649. struct trace_array *tr = iter->tr;
  650. struct ftrace_graph_ret *graph_ret;
  651. struct ftrace_graph_ent *call;
  652. unsigned long long duration;
  653. int i;
  654. graph_ret = &ret_entry->ret;
  655. call = &entry->graph_ent;
  656. duration = graph_ret->rettime - graph_ret->calltime;
  657. if (data) {
  658. struct fgraph_cpu_data *cpu_data;
  659. int cpu = iter->cpu;
  660. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  661. /*
  662. * Comments display at + 1 to depth. Since
  663. * this is a leaf function, keep the comments
  664. * equal to this depth.
  665. */
  666. cpu_data->depth = call->depth - 1;
  667. /* No need to keep this function around for this depth */
  668. if (call->depth < FTRACE_RETFUNC_DEPTH)
  669. cpu_data->enter_funcs[call->depth] = 0;
  670. }
  671. /* Overhead and duration */
  672. print_graph_duration(tr, duration, s, flags);
  673. /* Function */
  674. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  675. trace_seq_putc(s, ' ');
  676. trace_seq_printf(s, "%ps();\n", (void *)call->func);
  677. return trace_handle_return(s);
  678. }
  679. static enum print_line_t
  680. print_graph_entry_nested(struct trace_iterator *iter,
  681. struct ftrace_graph_ent_entry *entry,
  682. struct trace_seq *s, int cpu, u32 flags)
  683. {
  684. struct ftrace_graph_ent *call = &entry->graph_ent;
  685. struct fgraph_data *data = iter->private;
  686. struct trace_array *tr = iter->tr;
  687. int i;
  688. if (data) {
  689. struct fgraph_cpu_data *cpu_data;
  690. int cpu = iter->cpu;
  691. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  692. cpu_data->depth = call->depth;
  693. /* Save this function pointer to see if the exit matches */
  694. if (call->depth < FTRACE_RETFUNC_DEPTH)
  695. cpu_data->enter_funcs[call->depth] = call->func;
  696. }
  697. /* No time */
  698. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  699. /* Function */
  700. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  701. trace_seq_putc(s, ' ');
  702. trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  703. if (trace_seq_has_overflowed(s))
  704. return TRACE_TYPE_PARTIAL_LINE;
  705. /*
  706. * we already consumed the current entry to check the next one
  707. * and see if this is a leaf.
  708. */
  709. return TRACE_TYPE_NO_CONSUME;
  710. }
  711. static void
  712. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  713. int type, unsigned long addr, u32 flags)
  714. {
  715. struct fgraph_data *data = iter->private;
  716. struct trace_entry *ent = iter->ent;
  717. struct trace_array *tr = iter->tr;
  718. int cpu = iter->cpu;
  719. /* Pid */
  720. verif_pid(s, ent->pid, cpu, data);
  721. if (type)
  722. /* Interrupt */
  723. print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  724. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  725. return;
  726. /* Absolute time */
  727. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  728. print_graph_abs_time(iter->ts, s);
  729. /* Cpu */
  730. if (flags & TRACE_GRAPH_PRINT_CPU)
  731. print_graph_cpu(s, cpu);
  732. /* Proc */
  733. if (flags & TRACE_GRAPH_PRINT_PROC) {
  734. print_graph_proc(s, ent->pid);
  735. trace_seq_puts(s, " | ");
  736. }
  737. /* Latency format */
  738. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  739. print_graph_lat_fmt(s, ent);
  740. return;
  741. }
  742. /*
  743. * Entry check for irq code
  744. *
  745. * returns 1 if
  746. * - we are inside irq code
  747. * - we just entered irq code
  748. *
  749. * retunns 0 if
  750. * - funcgraph-interrupts option is set
  751. * - we are not inside irq code
  752. */
  753. static int
  754. check_irq_entry(struct trace_iterator *iter, u32 flags,
  755. unsigned long addr, int depth)
  756. {
  757. int cpu = iter->cpu;
  758. int *depth_irq;
  759. struct fgraph_data *data = iter->private;
  760. /*
  761. * If we are either displaying irqs, or we got called as
  762. * a graph event and private data does not exist,
  763. * then we bypass the irq check.
  764. */
  765. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  766. (!data))
  767. return 0;
  768. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  769. /*
  770. * We are inside the irq code
  771. */
  772. if (*depth_irq >= 0)
  773. return 1;
  774. if ((addr < (unsigned long)__irqentry_text_start) ||
  775. (addr >= (unsigned long)__irqentry_text_end))
  776. return 0;
  777. /*
  778. * We are entering irq code.
  779. */
  780. *depth_irq = depth;
  781. return 1;
  782. }
  783. /*
  784. * Return check for irq code
  785. *
  786. * returns 1 if
  787. * - we are inside irq code
  788. * - we just left irq code
  789. *
  790. * returns 0 if
  791. * - funcgraph-interrupts option is set
  792. * - we are not inside irq code
  793. */
  794. static int
  795. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  796. {
  797. int cpu = iter->cpu;
  798. int *depth_irq;
  799. struct fgraph_data *data = iter->private;
  800. /*
  801. * If we are either displaying irqs, or we got called as
  802. * a graph event and private data does not exist,
  803. * then we bypass the irq check.
  804. */
  805. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  806. (!data))
  807. return 0;
  808. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  809. /*
  810. * We are not inside the irq code.
  811. */
  812. if (*depth_irq == -1)
  813. return 0;
  814. /*
  815. * We are inside the irq code, and this is returning entry.
  816. * Let's not trace it and clear the entry depth, since
  817. * we are out of irq code.
  818. *
  819. * This condition ensures that we 'leave the irq code' once
  820. * we are out of the entry depth. Thus protecting us from
  821. * the RETURN entry loss.
  822. */
  823. if (*depth_irq >= depth) {
  824. *depth_irq = -1;
  825. return 1;
  826. }
  827. /*
  828. * We are inside the irq code, and this is not the entry.
  829. */
  830. return 1;
  831. }
  832. static enum print_line_t
  833. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  834. struct trace_iterator *iter, u32 flags)
  835. {
  836. struct fgraph_data *data = iter->private;
  837. struct ftrace_graph_ent *call = &field->graph_ent;
  838. struct ftrace_graph_ret_entry *leaf_ret;
  839. static enum print_line_t ret;
  840. int cpu = iter->cpu;
  841. if (check_irq_entry(iter, flags, call->func, call->depth))
  842. return TRACE_TYPE_HANDLED;
  843. print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
  844. leaf_ret = get_return_for_leaf(iter, field);
  845. if (leaf_ret)
  846. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  847. else
  848. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  849. if (data) {
  850. /*
  851. * If we failed to write our output, then we need to make
  852. * note of it. Because we already consumed our entry.
  853. */
  854. if (s->full) {
  855. data->failed = 1;
  856. data->cpu = cpu;
  857. } else
  858. data->failed = 0;
  859. }
  860. return ret;
  861. }
  862. static enum print_line_t
  863. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  864. struct trace_entry *ent, struct trace_iterator *iter,
  865. u32 flags)
  866. {
  867. unsigned long long duration = trace->rettime - trace->calltime;
  868. struct fgraph_data *data = iter->private;
  869. struct trace_array *tr = iter->tr;
  870. pid_t pid = ent->pid;
  871. int cpu = iter->cpu;
  872. int func_match = 1;
  873. int i;
  874. if (check_irq_return(iter, flags, trace->depth))
  875. return TRACE_TYPE_HANDLED;
  876. if (data) {
  877. struct fgraph_cpu_data *cpu_data;
  878. int cpu = iter->cpu;
  879. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  880. /*
  881. * Comments display at + 1 to depth. This is the
  882. * return from a function, we now want the comments
  883. * to display at the same level of the bracket.
  884. */
  885. cpu_data->depth = trace->depth - 1;
  886. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  887. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  888. func_match = 0;
  889. cpu_data->enter_funcs[trace->depth] = 0;
  890. }
  891. }
  892. print_graph_prologue(iter, s, 0, 0, flags);
  893. /* Overhead and duration */
  894. print_graph_duration(tr, duration, s, flags);
  895. /* Closing brace */
  896. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
  897. trace_seq_putc(s, ' ');
  898. /*
  899. * If the return function does not have a matching entry,
  900. * then the entry was lost. Instead of just printing
  901. * the '}' and letting the user guess what function this
  902. * belongs to, write out the function name. Always do
  903. * that if the funcgraph-tail option is enabled.
  904. */
  905. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
  906. trace_seq_puts(s, "}\n");
  907. else
  908. trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  909. /* Overrun */
  910. if (flags & TRACE_GRAPH_PRINT_OVERRUN)
  911. trace_seq_printf(s, " (Overruns: %lu)\n",
  912. trace->overrun);
  913. print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  914. cpu, pid, flags);
  915. return trace_handle_return(s);
  916. }
  917. static enum print_line_t
  918. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  919. struct trace_iterator *iter, u32 flags)
  920. {
  921. struct trace_array *tr = iter->tr;
  922. unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
  923. struct fgraph_data *data = iter->private;
  924. struct trace_event *event;
  925. int depth = 0;
  926. int ret;
  927. int i;
  928. if (data)
  929. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  930. print_graph_prologue(iter, s, 0, 0, flags);
  931. /* No time */
  932. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  933. /* Indentation */
  934. if (depth > 0)
  935. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
  936. trace_seq_putc(s, ' ');
  937. /* The comment */
  938. trace_seq_puts(s, "/* ");
  939. switch (iter->ent->type) {
  940. case TRACE_BPRINT:
  941. ret = trace_print_bprintk_msg_only(iter);
  942. if (ret != TRACE_TYPE_HANDLED)
  943. return ret;
  944. break;
  945. case TRACE_PRINT:
  946. ret = trace_print_printk_msg_only(iter);
  947. if (ret != TRACE_TYPE_HANDLED)
  948. return ret;
  949. break;
  950. default:
  951. event = ftrace_find_event(ent->type);
  952. if (!event)
  953. return TRACE_TYPE_UNHANDLED;
  954. ret = event->funcs->trace(iter, sym_flags, event);
  955. if (ret != TRACE_TYPE_HANDLED)
  956. return ret;
  957. }
  958. if (trace_seq_has_overflowed(s))
  959. goto out;
  960. /* Strip ending newline */
  961. if (s->buffer[s->seq.len - 1] == '\n') {
  962. s->buffer[s->seq.len - 1] = '\0';
  963. s->seq.len--;
  964. }
  965. trace_seq_puts(s, " */\n");
  966. out:
  967. return trace_handle_return(s);
  968. }
  969. enum print_line_t
  970. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  971. {
  972. struct ftrace_graph_ent_entry *field;
  973. struct fgraph_data *data = iter->private;
  974. struct trace_entry *entry = iter->ent;
  975. struct trace_seq *s = &iter->seq;
  976. int cpu = iter->cpu;
  977. int ret;
  978. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  979. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  980. return TRACE_TYPE_HANDLED;
  981. }
  982. /*
  983. * If the last output failed, there's a possibility we need
  984. * to print out the missing entry which would never go out.
  985. */
  986. if (data && data->failed) {
  987. field = &data->ent;
  988. iter->cpu = data->cpu;
  989. ret = print_graph_entry(field, s, iter, flags);
  990. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  991. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  992. ret = TRACE_TYPE_NO_CONSUME;
  993. }
  994. iter->cpu = cpu;
  995. return ret;
  996. }
  997. switch (entry->type) {
  998. case TRACE_GRAPH_ENT: {
  999. /*
  1000. * print_graph_entry() may consume the current event,
  1001. * thus @field may become invalid, so we need to save it.
  1002. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1003. * it can be safely saved at the stack.
  1004. */
  1005. struct ftrace_graph_ent_entry saved;
  1006. trace_assign_type(field, entry);
  1007. saved = *field;
  1008. return print_graph_entry(&saved, s, iter, flags);
  1009. }
  1010. case TRACE_GRAPH_RET: {
  1011. struct ftrace_graph_ret_entry *field;
  1012. trace_assign_type(field, entry);
  1013. return print_graph_return(&field->ret, s, entry, iter, flags);
  1014. }
  1015. case TRACE_STACK:
  1016. case TRACE_FN:
  1017. /* dont trace stack and functions as comments */
  1018. return TRACE_TYPE_UNHANDLED;
  1019. default:
  1020. return print_graph_comment(s, entry, iter, flags);
  1021. }
  1022. return TRACE_TYPE_HANDLED;
  1023. }
  1024. static enum print_line_t
  1025. print_graph_function(struct trace_iterator *iter)
  1026. {
  1027. return print_graph_function_flags(iter, tracer_flags.val);
  1028. }
  1029. static enum print_line_t
  1030. print_graph_function_event(struct trace_iterator *iter, int flags,
  1031. struct trace_event *event)
  1032. {
  1033. return print_graph_function(iter);
  1034. }
  1035. static void print_lat_header(struct seq_file *s, u32 flags)
  1036. {
  1037. static const char spaces[] = " " /* 16 spaces */
  1038. " " /* 4 spaces */
  1039. " "; /* 17 spaces */
  1040. int size = 0;
  1041. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1042. size += 16;
  1043. if (flags & TRACE_GRAPH_PRINT_CPU)
  1044. size += 4;
  1045. if (flags & TRACE_GRAPH_PRINT_PROC)
  1046. size += 17;
  1047. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1048. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1049. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1050. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1051. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1052. }
  1053. static void __print_graph_headers_flags(struct trace_array *tr,
  1054. struct seq_file *s, u32 flags)
  1055. {
  1056. int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
  1057. if (lat)
  1058. print_lat_header(s, flags);
  1059. /* 1st line */
  1060. seq_putc(s, '#');
  1061. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1062. seq_puts(s, " TIME ");
  1063. if (flags & TRACE_GRAPH_PRINT_CPU)
  1064. seq_puts(s, " CPU");
  1065. if (flags & TRACE_GRAPH_PRINT_PROC)
  1066. seq_puts(s, " TASK/PID ");
  1067. if (lat)
  1068. seq_puts(s, "||||");
  1069. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1070. seq_puts(s, " DURATION ");
  1071. seq_puts(s, " FUNCTION CALLS\n");
  1072. /* 2nd line */
  1073. seq_putc(s, '#');
  1074. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1075. seq_puts(s, " | ");
  1076. if (flags & TRACE_GRAPH_PRINT_CPU)
  1077. seq_puts(s, " | ");
  1078. if (flags & TRACE_GRAPH_PRINT_PROC)
  1079. seq_puts(s, " | | ");
  1080. if (lat)
  1081. seq_puts(s, "||||");
  1082. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1083. seq_puts(s, " | | ");
  1084. seq_puts(s, " | | | |\n");
  1085. }
  1086. static void print_graph_headers(struct seq_file *s)
  1087. {
  1088. print_graph_headers_flags(s, tracer_flags.val);
  1089. }
  1090. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1091. {
  1092. struct trace_iterator *iter = s->private;
  1093. struct trace_array *tr = iter->tr;
  1094. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  1095. return;
  1096. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
  1097. /* print nothing if the buffers are empty */
  1098. if (trace_empty(iter))
  1099. return;
  1100. print_trace_header(s, iter);
  1101. }
  1102. __print_graph_headers_flags(tr, s, flags);
  1103. }
  1104. void graph_trace_open(struct trace_iterator *iter)
  1105. {
  1106. /* pid and depth on the last trace processed */
  1107. struct fgraph_data *data;
  1108. gfp_t gfpflags;
  1109. int cpu;
  1110. iter->private = NULL;
  1111. /* We can be called in atomic context via ftrace_dump() */
  1112. gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  1113. data = kzalloc(sizeof(*data), gfpflags);
  1114. if (!data)
  1115. goto out_err;
  1116. data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
  1117. if (!data->cpu_data)
  1118. goto out_err_free;
  1119. for_each_possible_cpu(cpu) {
  1120. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1121. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1122. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1123. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1124. *pid = -1;
  1125. *depth = 0;
  1126. *ignore = 0;
  1127. *depth_irq = -1;
  1128. }
  1129. iter->private = data;
  1130. return;
  1131. out_err_free:
  1132. kfree(data);
  1133. out_err:
  1134. pr_warn("function graph tracer: not enough memory\n");
  1135. }
  1136. void graph_trace_close(struct trace_iterator *iter)
  1137. {
  1138. struct fgraph_data *data = iter->private;
  1139. if (data) {
  1140. free_percpu(data->cpu_data);
  1141. kfree(data);
  1142. }
  1143. }
  1144. static int
  1145. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1146. {
  1147. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1148. ftrace_graph_skip_irqs = !set;
  1149. if (bit == TRACE_GRAPH_SLEEP_TIME)
  1150. ftrace_graph_sleep_time_control(set);
  1151. if (bit == TRACE_GRAPH_GRAPH_TIME)
  1152. ftrace_graph_graph_time_control(set);
  1153. return 0;
  1154. }
  1155. static struct trace_event_functions graph_functions = {
  1156. .trace = print_graph_function_event,
  1157. };
  1158. static struct trace_event graph_trace_entry_event = {
  1159. .type = TRACE_GRAPH_ENT,
  1160. .funcs = &graph_functions,
  1161. };
  1162. static struct trace_event graph_trace_ret_event = {
  1163. .type = TRACE_GRAPH_RET,
  1164. .funcs = &graph_functions
  1165. };
  1166. static struct tracer graph_trace __tracer_data = {
  1167. .name = "function_graph",
  1168. .update_thresh = graph_trace_update_thresh,
  1169. .open = graph_trace_open,
  1170. .pipe_open = graph_trace_open,
  1171. .close = graph_trace_close,
  1172. .pipe_close = graph_trace_close,
  1173. .init = graph_trace_init,
  1174. .reset = graph_trace_reset,
  1175. .print_line = print_graph_function,
  1176. .print_header = print_graph_headers,
  1177. .flags = &tracer_flags,
  1178. .set_flag = func_graph_set_flag,
  1179. #ifdef CONFIG_FTRACE_SELFTEST
  1180. .selftest = trace_selftest_startup_function_graph,
  1181. #endif
  1182. };
  1183. static ssize_t
  1184. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1185. loff_t *ppos)
  1186. {
  1187. unsigned long val;
  1188. int ret;
  1189. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1190. if (ret)
  1191. return ret;
  1192. max_depth = val;
  1193. *ppos += cnt;
  1194. return cnt;
  1195. }
  1196. static ssize_t
  1197. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1198. loff_t *ppos)
  1199. {
  1200. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1201. int n;
  1202. n = sprintf(buf, "%d\n", max_depth);
  1203. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1204. }
  1205. static const struct file_operations graph_depth_fops = {
  1206. .open = tracing_open_generic,
  1207. .write = graph_depth_write,
  1208. .read = graph_depth_read,
  1209. .llseek = generic_file_llseek,
  1210. };
  1211. static __init int init_graph_tracefs(void)
  1212. {
  1213. struct dentry *d_tracer;
  1214. d_tracer = tracing_init_dentry();
  1215. if (IS_ERR(d_tracer))
  1216. return 0;
  1217. trace_create_file("max_graph_depth", 0644, d_tracer,
  1218. NULL, &graph_depth_fops);
  1219. return 0;
  1220. }
  1221. fs_initcall(init_graph_tracefs);
  1222. static __init int init_graph_trace(void)
  1223. {
  1224. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1225. if (!register_trace_event(&graph_trace_entry_event)) {
  1226. pr_warn("Warning: could not register graph trace events\n");
  1227. return 1;
  1228. }
  1229. if (!register_trace_event(&graph_trace_ret_event)) {
  1230. pr_warn("Warning: could not register graph trace events\n");
  1231. return 1;
  1232. }
  1233. return register_tracer(&graph_trace);
  1234. }
  1235. core_initcall(init_graph_trace);