trace_functions_graph.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. static bool kill_ftrace_graph;
  17. /**
  18. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  19. *
  20. * ftrace_graph_stop() is called when a severe error is detected in
  21. * the function graph tracing. This function is called by the critical
  22. * paths of function graph to keep those paths from doing any more harm.
  23. */
  24. bool ftrace_graph_is_dead(void)
  25. {
  26. return kill_ftrace_graph;
  27. }
  28. /**
  29. * ftrace_graph_stop - set to permanently disable function graph tracincg
  30. *
  31. * In case of an error int function graph tracing, this is called
  32. * to try to keep function graph tracing from causing any more harm.
  33. * Usually this is pretty severe and this is called to try to at least
  34. * get a warning out to the user.
  35. */
  36. void ftrace_graph_stop(void)
  37. {
  38. kill_ftrace_graph = true;
  39. }
  40. /* When set, irq functions will be ignored */
  41. static int ftrace_graph_skip_irqs;
  42. struct fgraph_cpu_data {
  43. pid_t last_pid;
  44. int depth;
  45. int depth_irq;
  46. int ignore;
  47. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  48. };
  49. struct fgraph_data {
  50. struct fgraph_cpu_data __percpu *cpu_data;
  51. /* Place to preserve last processed entry. */
  52. struct ftrace_graph_ent_entry ent;
  53. struct ftrace_graph_ret_entry ret;
  54. int failed;
  55. int cpu;
  56. };
  57. #define TRACE_GRAPH_INDENT 2
  58. static unsigned int max_depth;
  59. static struct tracer_opt trace_opts[] = {
  60. /* Display overruns? (for self-debug purpose) */
  61. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  62. /* Display CPU ? */
  63. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  64. /* Display Overhead ? */
  65. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  66. /* Display proc name/pid */
  67. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  68. /* Display duration of execution */
  69. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  70. /* Display absolute time of an entry */
  71. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  72. /* Display interrupts */
  73. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  74. /* Display function name after trailing } */
  75. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  76. { } /* Empty entry */
  77. };
  78. static struct tracer_flags tracer_flags = {
  79. /* Don't display overruns, proc, or tail by default */
  80. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  81. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
  82. .opts = trace_opts
  83. };
  84. static struct trace_array *graph_array;
  85. /*
  86. * DURATION column is being also used to display IRQ signs,
  87. * following values are used by print_graph_irq and others
  88. * to fill in space into DURATION column.
  89. */
  90. enum {
  91. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  92. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  93. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  94. };
  95. static enum print_line_t
  96. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  97. u32 flags);
  98. /* Add a function return address to the trace stack on thread info.*/
  99. int
  100. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  101. unsigned long frame_pointer)
  102. {
  103. unsigned long long calltime;
  104. int index;
  105. if (unlikely(ftrace_graph_is_dead()))
  106. return -EBUSY;
  107. if (!current->ret_stack)
  108. return -EBUSY;
  109. /*
  110. * We must make sure the ret_stack is tested before we read
  111. * anything else.
  112. */
  113. smp_rmb();
  114. /* The return trace stack is full */
  115. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  116. atomic_inc(&current->trace_overrun);
  117. return -EBUSY;
  118. }
  119. /*
  120. * The curr_ret_stack is an index to ftrace return stack of
  121. * current task. Its value should be in [0, FTRACE_RETFUNC_
  122. * DEPTH) when the function graph tracer is used. To support
  123. * filtering out specific functions, it makes the index
  124. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  125. * so when it sees a negative index the ftrace will ignore
  126. * the record. And the index gets recovered when returning
  127. * from the filtered function by adding the FTRACE_NOTRACE_
  128. * DEPTH and then it'll continue to record functions normally.
  129. *
  130. * The curr_ret_stack is initialized to -1 and get increased
  131. * in this function. So it can be less than -1 only if it was
  132. * filtered out via ftrace_graph_notrace_addr() which can be
  133. * set from set_graph_notrace file in debugfs by user.
  134. */
  135. if (current->curr_ret_stack < -1)
  136. return -EBUSY;
  137. calltime = trace_clock_local();
  138. index = ++current->curr_ret_stack;
  139. if (ftrace_graph_notrace_addr(func))
  140. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  141. barrier();
  142. current->ret_stack[index].ret = ret;
  143. current->ret_stack[index].func = func;
  144. current->ret_stack[index].calltime = calltime;
  145. current->ret_stack[index].subtime = 0;
  146. current->ret_stack[index].fp = frame_pointer;
  147. *depth = current->curr_ret_stack;
  148. return 0;
  149. }
  150. /* Retrieve a function return address to the trace stack on thread info.*/
  151. static void
  152. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  153. unsigned long frame_pointer)
  154. {
  155. int index;
  156. index = current->curr_ret_stack;
  157. /*
  158. * A negative index here means that it's just returned from a
  159. * notrace'd function. Recover index to get an original
  160. * return address. See ftrace_push_return_trace().
  161. *
  162. * TODO: Need to check whether the stack gets corrupted.
  163. */
  164. if (index < 0)
  165. index += FTRACE_NOTRACE_DEPTH;
  166. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  167. ftrace_graph_stop();
  168. WARN_ON(1);
  169. /* Might as well panic, otherwise we have no where to go */
  170. *ret = (unsigned long)panic;
  171. return;
  172. }
  173. #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
  174. /*
  175. * The arch may choose to record the frame pointer used
  176. * and check it here to make sure that it is what we expect it
  177. * to be. If gcc does not set the place holder of the return
  178. * address in the frame pointer, and does a copy instead, then
  179. * the function graph trace will fail. This test detects this
  180. * case.
  181. *
  182. * Currently, x86_32 with optimize for size (-Os) makes the latest
  183. * gcc do the above.
  184. *
  185. * Note, -mfentry does not use frame pointers, and this test
  186. * is not needed if CC_USING_FENTRY is set.
  187. */
  188. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  189. ftrace_graph_stop();
  190. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  191. " from func %ps return to %lx\n",
  192. current->ret_stack[index].fp,
  193. frame_pointer,
  194. (void *)current->ret_stack[index].func,
  195. current->ret_stack[index].ret);
  196. *ret = (unsigned long)panic;
  197. return;
  198. }
  199. #endif
  200. *ret = current->ret_stack[index].ret;
  201. trace->func = current->ret_stack[index].func;
  202. trace->calltime = current->ret_stack[index].calltime;
  203. trace->overrun = atomic_read(&current->trace_overrun);
  204. trace->depth = index;
  205. }
  206. /*
  207. * Send the trace to the ring-buffer.
  208. * @return the original return address.
  209. */
  210. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  211. {
  212. struct ftrace_graph_ret trace;
  213. unsigned long ret;
  214. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  215. trace.rettime = trace_clock_local();
  216. barrier();
  217. current->curr_ret_stack--;
  218. /*
  219. * The curr_ret_stack can be less than -1 only if it was
  220. * filtered out and it's about to return from the function.
  221. * Recover the index and continue to trace normal functions.
  222. */
  223. if (current->curr_ret_stack < -1) {
  224. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  225. return ret;
  226. }
  227. /*
  228. * The trace should run after decrementing the ret counter
  229. * in case an interrupt were to come in. We don't want to
  230. * lose the interrupt if max_depth is set.
  231. */
  232. ftrace_graph_return(&trace);
  233. if (unlikely(!ret)) {
  234. ftrace_graph_stop();
  235. WARN_ON(1);
  236. /* Might as well panic. What else to do? */
  237. ret = (unsigned long)panic;
  238. }
  239. return ret;
  240. }
  241. int __trace_graph_entry(struct trace_array *tr,
  242. struct ftrace_graph_ent *trace,
  243. unsigned long flags,
  244. int pc)
  245. {
  246. struct ftrace_event_call *call = &event_funcgraph_entry;
  247. struct ring_buffer_event *event;
  248. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  249. struct ftrace_graph_ent_entry *entry;
  250. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  251. return 0;
  252. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  253. sizeof(*entry), flags, pc);
  254. if (!event)
  255. return 0;
  256. entry = ring_buffer_event_data(event);
  257. entry->graph_ent = *trace;
  258. if (!call_filter_check_discard(call, entry, buffer, event))
  259. __buffer_unlock_commit(buffer, event);
  260. return 1;
  261. }
  262. static inline int ftrace_graph_ignore_irqs(void)
  263. {
  264. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  265. return 0;
  266. return in_irq();
  267. }
  268. int trace_graph_entry(struct ftrace_graph_ent *trace)
  269. {
  270. struct trace_array *tr = graph_array;
  271. struct trace_array_cpu *data;
  272. unsigned long flags;
  273. long disabled;
  274. int ret;
  275. int cpu;
  276. int pc;
  277. if (!ftrace_trace_task(current))
  278. return 0;
  279. /* trace it when it is-nested-in or is a function enabled. */
  280. if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
  281. ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
  282. (max_depth && trace->depth >= max_depth))
  283. return 0;
  284. /*
  285. * Do not trace a function if it's filtered by set_graph_notrace.
  286. * Make the index of ret stack negative to indicate that it should
  287. * ignore further functions. But it needs its own ret stack entry
  288. * to recover the original index in order to continue tracing after
  289. * returning from the function.
  290. */
  291. if (ftrace_graph_notrace_addr(trace->func))
  292. return 1;
  293. local_irq_save(flags);
  294. cpu = raw_smp_processor_id();
  295. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  296. disabled = atomic_inc_return(&data->disabled);
  297. if (likely(disabled == 1)) {
  298. pc = preempt_count();
  299. ret = __trace_graph_entry(tr, trace, flags, pc);
  300. } else {
  301. ret = 0;
  302. }
  303. atomic_dec(&data->disabled);
  304. local_irq_restore(flags);
  305. return ret;
  306. }
  307. static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
  308. {
  309. if (tracing_thresh)
  310. return 1;
  311. else
  312. return trace_graph_entry(trace);
  313. }
  314. static void
  315. __trace_graph_function(struct trace_array *tr,
  316. unsigned long ip, unsigned long flags, int pc)
  317. {
  318. u64 time = trace_clock_local();
  319. struct ftrace_graph_ent ent = {
  320. .func = ip,
  321. .depth = 0,
  322. };
  323. struct ftrace_graph_ret ret = {
  324. .func = ip,
  325. .depth = 0,
  326. .calltime = time,
  327. .rettime = time,
  328. };
  329. __trace_graph_entry(tr, &ent, flags, pc);
  330. __trace_graph_return(tr, &ret, flags, pc);
  331. }
  332. void
  333. trace_graph_function(struct trace_array *tr,
  334. unsigned long ip, unsigned long parent_ip,
  335. unsigned long flags, int pc)
  336. {
  337. __trace_graph_function(tr, ip, flags, pc);
  338. }
  339. void __trace_graph_return(struct trace_array *tr,
  340. struct ftrace_graph_ret *trace,
  341. unsigned long flags,
  342. int pc)
  343. {
  344. struct ftrace_event_call *call = &event_funcgraph_exit;
  345. struct ring_buffer_event *event;
  346. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  347. struct ftrace_graph_ret_entry *entry;
  348. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  349. return;
  350. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  351. sizeof(*entry), flags, pc);
  352. if (!event)
  353. return;
  354. entry = ring_buffer_event_data(event);
  355. entry->ret = *trace;
  356. if (!call_filter_check_discard(call, entry, buffer, event))
  357. __buffer_unlock_commit(buffer, event);
  358. }
  359. void trace_graph_return(struct ftrace_graph_ret *trace)
  360. {
  361. struct trace_array *tr = graph_array;
  362. struct trace_array_cpu *data;
  363. unsigned long flags;
  364. long disabled;
  365. int cpu;
  366. int pc;
  367. local_irq_save(flags);
  368. cpu = raw_smp_processor_id();
  369. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  370. disabled = atomic_inc_return(&data->disabled);
  371. if (likely(disabled == 1)) {
  372. pc = preempt_count();
  373. __trace_graph_return(tr, trace, flags, pc);
  374. }
  375. atomic_dec(&data->disabled);
  376. local_irq_restore(flags);
  377. }
  378. void set_graph_array(struct trace_array *tr)
  379. {
  380. graph_array = tr;
  381. /* Make graph_array visible before we start tracing */
  382. smp_mb();
  383. }
  384. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  385. {
  386. if (tracing_thresh &&
  387. (trace->rettime - trace->calltime < tracing_thresh))
  388. return;
  389. else
  390. trace_graph_return(trace);
  391. }
  392. static int graph_trace_init(struct trace_array *tr)
  393. {
  394. int ret;
  395. set_graph_array(tr);
  396. if (tracing_thresh)
  397. ret = register_ftrace_graph(&trace_graph_thresh_return,
  398. &trace_graph_thresh_entry);
  399. else
  400. ret = register_ftrace_graph(&trace_graph_return,
  401. &trace_graph_entry);
  402. if (ret)
  403. return ret;
  404. tracing_start_cmdline_record();
  405. return 0;
  406. }
  407. static void graph_trace_reset(struct trace_array *tr)
  408. {
  409. tracing_stop_cmdline_record();
  410. unregister_ftrace_graph();
  411. }
  412. static int graph_trace_update_thresh(struct trace_array *tr)
  413. {
  414. graph_trace_reset(tr);
  415. return graph_trace_init(tr);
  416. }
  417. static int max_bytes_for_cpu;
  418. static enum print_line_t
  419. print_graph_cpu(struct trace_seq *s, int cpu)
  420. {
  421. int ret;
  422. /*
  423. * Start with a space character - to make it stand out
  424. * to the right a bit when trace output is pasted into
  425. * email:
  426. */
  427. ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  428. if (!ret)
  429. return TRACE_TYPE_PARTIAL_LINE;
  430. return TRACE_TYPE_HANDLED;
  431. }
  432. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  433. static enum print_line_t
  434. print_graph_proc(struct trace_seq *s, pid_t pid)
  435. {
  436. char comm[TASK_COMM_LEN];
  437. /* sign + log10(MAX_INT) + '\0' */
  438. char pid_str[11];
  439. int spaces = 0;
  440. int ret;
  441. int len;
  442. int i;
  443. trace_find_cmdline(pid, comm);
  444. comm[7] = '\0';
  445. sprintf(pid_str, "%d", pid);
  446. /* 1 stands for the "-" character */
  447. len = strlen(comm) + strlen(pid_str) + 1;
  448. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  449. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  450. /* First spaces to align center */
  451. for (i = 0; i < spaces / 2; i++) {
  452. ret = trace_seq_putc(s, ' ');
  453. if (!ret)
  454. return TRACE_TYPE_PARTIAL_LINE;
  455. }
  456. ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
  457. if (!ret)
  458. return TRACE_TYPE_PARTIAL_LINE;
  459. /* Last spaces to align center */
  460. for (i = 0; i < spaces - (spaces / 2); i++) {
  461. ret = trace_seq_putc(s, ' ');
  462. if (!ret)
  463. return TRACE_TYPE_PARTIAL_LINE;
  464. }
  465. return TRACE_TYPE_HANDLED;
  466. }
  467. static enum print_line_t
  468. print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  469. {
  470. if (!trace_seq_putc(s, ' '))
  471. return 0;
  472. return trace_print_lat_fmt(s, entry);
  473. }
  474. /* If the pid changed since the last trace, output this event */
  475. static enum print_line_t
  476. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  477. {
  478. pid_t prev_pid;
  479. pid_t *last_pid;
  480. int ret;
  481. if (!data)
  482. return TRACE_TYPE_HANDLED;
  483. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  484. if (*last_pid == pid)
  485. return TRACE_TYPE_HANDLED;
  486. prev_pid = *last_pid;
  487. *last_pid = pid;
  488. if (prev_pid == -1)
  489. return TRACE_TYPE_HANDLED;
  490. /*
  491. * Context-switch trace line:
  492. ------------------------------------------
  493. | 1) migration/0--1 => sshd-1755
  494. ------------------------------------------
  495. */
  496. ret = trace_seq_puts(s,
  497. " ------------------------------------------\n");
  498. if (!ret)
  499. return TRACE_TYPE_PARTIAL_LINE;
  500. ret = print_graph_cpu(s, cpu);
  501. if (ret == TRACE_TYPE_PARTIAL_LINE)
  502. return TRACE_TYPE_PARTIAL_LINE;
  503. ret = print_graph_proc(s, prev_pid);
  504. if (ret == TRACE_TYPE_PARTIAL_LINE)
  505. return TRACE_TYPE_PARTIAL_LINE;
  506. ret = trace_seq_puts(s, " => ");
  507. if (!ret)
  508. return TRACE_TYPE_PARTIAL_LINE;
  509. ret = print_graph_proc(s, pid);
  510. if (ret == TRACE_TYPE_PARTIAL_LINE)
  511. return TRACE_TYPE_PARTIAL_LINE;
  512. ret = trace_seq_puts(s,
  513. "\n ------------------------------------------\n\n");
  514. if (!ret)
  515. return TRACE_TYPE_PARTIAL_LINE;
  516. return TRACE_TYPE_HANDLED;
  517. }
  518. static struct ftrace_graph_ret_entry *
  519. get_return_for_leaf(struct trace_iterator *iter,
  520. struct ftrace_graph_ent_entry *curr)
  521. {
  522. struct fgraph_data *data = iter->private;
  523. struct ring_buffer_iter *ring_iter = NULL;
  524. struct ring_buffer_event *event;
  525. struct ftrace_graph_ret_entry *next;
  526. /*
  527. * If the previous output failed to write to the seq buffer,
  528. * then we just reuse the data from before.
  529. */
  530. if (data && data->failed) {
  531. curr = &data->ent;
  532. next = &data->ret;
  533. } else {
  534. ring_iter = trace_buffer_iter(iter, iter->cpu);
  535. /* First peek to compare current entry and the next one */
  536. if (ring_iter)
  537. event = ring_buffer_iter_peek(ring_iter, NULL);
  538. else {
  539. /*
  540. * We need to consume the current entry to see
  541. * the next one.
  542. */
  543. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  544. NULL, NULL);
  545. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  546. NULL, NULL);
  547. }
  548. if (!event)
  549. return NULL;
  550. next = ring_buffer_event_data(event);
  551. if (data) {
  552. /*
  553. * Save current and next entries for later reference
  554. * if the output fails.
  555. */
  556. data->ent = *curr;
  557. /*
  558. * If the next event is not a return type, then
  559. * we only care about what type it is. Otherwise we can
  560. * safely copy the entire event.
  561. */
  562. if (next->ent.type == TRACE_GRAPH_RET)
  563. data->ret = *next;
  564. else
  565. data->ret.ent.type = next->ent.type;
  566. }
  567. }
  568. if (next->ent.type != TRACE_GRAPH_RET)
  569. return NULL;
  570. if (curr->ent.pid != next->ent.pid ||
  571. curr->graph_ent.func != next->ret.func)
  572. return NULL;
  573. /* this is a leaf, now advance the iterator */
  574. if (ring_iter)
  575. ring_buffer_read(ring_iter, NULL);
  576. return next;
  577. }
  578. static int print_graph_abs_time(u64 t, struct trace_seq *s)
  579. {
  580. unsigned long usecs_rem;
  581. usecs_rem = do_div(t, NSEC_PER_SEC);
  582. usecs_rem /= 1000;
  583. return trace_seq_printf(s, "%5lu.%06lu | ",
  584. (unsigned long)t, usecs_rem);
  585. }
  586. static enum print_line_t
  587. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  588. enum trace_type type, int cpu, pid_t pid, u32 flags)
  589. {
  590. int ret;
  591. struct trace_seq *s = &iter->seq;
  592. if (addr < (unsigned long)__irqentry_text_start ||
  593. addr >= (unsigned long)__irqentry_text_end)
  594. return TRACE_TYPE_UNHANDLED;
  595. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  596. /* Absolute time */
  597. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  598. ret = print_graph_abs_time(iter->ts, s);
  599. if (!ret)
  600. return TRACE_TYPE_PARTIAL_LINE;
  601. }
  602. /* Cpu */
  603. if (flags & TRACE_GRAPH_PRINT_CPU) {
  604. ret = print_graph_cpu(s, cpu);
  605. if (ret == TRACE_TYPE_PARTIAL_LINE)
  606. return TRACE_TYPE_PARTIAL_LINE;
  607. }
  608. /* Proc */
  609. if (flags & TRACE_GRAPH_PRINT_PROC) {
  610. ret = print_graph_proc(s, pid);
  611. if (ret == TRACE_TYPE_PARTIAL_LINE)
  612. return TRACE_TYPE_PARTIAL_LINE;
  613. ret = trace_seq_puts(s, " | ");
  614. if (!ret)
  615. return TRACE_TYPE_PARTIAL_LINE;
  616. }
  617. }
  618. /* No overhead */
  619. ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
  620. if (ret != TRACE_TYPE_HANDLED)
  621. return ret;
  622. if (type == TRACE_GRAPH_ENT)
  623. ret = trace_seq_puts(s, "==========>");
  624. else
  625. ret = trace_seq_puts(s, "<==========");
  626. if (!ret)
  627. return TRACE_TYPE_PARTIAL_LINE;
  628. ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
  629. if (ret != TRACE_TYPE_HANDLED)
  630. return ret;
  631. ret = trace_seq_putc(s, '\n');
  632. if (!ret)
  633. return TRACE_TYPE_PARTIAL_LINE;
  634. return TRACE_TYPE_HANDLED;
  635. }
  636. enum print_line_t
  637. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  638. {
  639. unsigned long nsecs_rem = do_div(duration, 1000);
  640. /* log10(ULONG_MAX) + '\0' */
  641. char msecs_str[21];
  642. char nsecs_str[5];
  643. int ret, len;
  644. int i;
  645. sprintf(msecs_str, "%lu", (unsigned long) duration);
  646. /* Print msecs */
  647. ret = trace_seq_printf(s, "%s", msecs_str);
  648. if (!ret)
  649. return TRACE_TYPE_PARTIAL_LINE;
  650. len = strlen(msecs_str);
  651. /* Print nsecs (we don't want to exceed 7 numbers) */
  652. if (len < 7) {
  653. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  654. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  655. ret = trace_seq_printf(s, ".%s", nsecs_str);
  656. if (!ret)
  657. return TRACE_TYPE_PARTIAL_LINE;
  658. len += strlen(nsecs_str);
  659. }
  660. ret = trace_seq_puts(s, " us ");
  661. if (!ret)
  662. return TRACE_TYPE_PARTIAL_LINE;
  663. /* Print remaining spaces to fit the row's width */
  664. for (i = len; i < 7; i++) {
  665. ret = trace_seq_putc(s, ' ');
  666. if (!ret)
  667. return TRACE_TYPE_PARTIAL_LINE;
  668. }
  669. return TRACE_TYPE_HANDLED;
  670. }
  671. static enum print_line_t
  672. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  673. u32 flags)
  674. {
  675. int ret = -1;
  676. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  677. !(trace_flags & TRACE_ITER_CONTEXT_INFO))
  678. return TRACE_TYPE_HANDLED;
  679. /* No real adata, just filling the column with spaces */
  680. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  681. case FLAGS_FILL_FULL:
  682. ret = trace_seq_puts(s, " | ");
  683. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  684. case FLAGS_FILL_START:
  685. ret = trace_seq_puts(s, " ");
  686. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  687. case FLAGS_FILL_END:
  688. ret = trace_seq_puts(s, " |");
  689. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  690. }
  691. /* Signal a overhead of time execution to the output */
  692. if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
  693. /* Duration exceeded 100 msecs */
  694. if (duration > 100000ULL)
  695. ret = trace_seq_puts(s, "! ");
  696. /* Duration exceeded 10 msecs */
  697. else if (duration > 10000ULL)
  698. ret = trace_seq_puts(s, "+ ");
  699. }
  700. /*
  701. * The -1 means we either did not exceed the duration tresholds
  702. * or we dont want to print out the overhead. Either way we need
  703. * to fill out the space.
  704. */
  705. if (ret == -1)
  706. ret = trace_seq_puts(s, " ");
  707. /* Catching here any failure happenned above */
  708. if (!ret)
  709. return TRACE_TYPE_PARTIAL_LINE;
  710. ret = trace_print_graph_duration(duration, s);
  711. if (ret != TRACE_TYPE_HANDLED)
  712. return ret;
  713. ret = trace_seq_puts(s, "| ");
  714. if (!ret)
  715. return TRACE_TYPE_PARTIAL_LINE;
  716. return TRACE_TYPE_HANDLED;
  717. }
  718. /* Case of a leaf function on its call entry */
  719. static enum print_line_t
  720. print_graph_entry_leaf(struct trace_iterator *iter,
  721. struct ftrace_graph_ent_entry *entry,
  722. struct ftrace_graph_ret_entry *ret_entry,
  723. struct trace_seq *s, u32 flags)
  724. {
  725. struct fgraph_data *data = iter->private;
  726. struct ftrace_graph_ret *graph_ret;
  727. struct ftrace_graph_ent *call;
  728. unsigned long long duration;
  729. int ret;
  730. int i;
  731. graph_ret = &ret_entry->ret;
  732. call = &entry->graph_ent;
  733. duration = graph_ret->rettime - graph_ret->calltime;
  734. if (data) {
  735. struct fgraph_cpu_data *cpu_data;
  736. int cpu = iter->cpu;
  737. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  738. /*
  739. * Comments display at + 1 to depth. Since
  740. * this is a leaf function, keep the comments
  741. * equal to this depth.
  742. */
  743. cpu_data->depth = call->depth - 1;
  744. /* No need to keep this function around for this depth */
  745. if (call->depth < FTRACE_RETFUNC_DEPTH)
  746. cpu_data->enter_funcs[call->depth] = 0;
  747. }
  748. /* Overhead and duration */
  749. ret = print_graph_duration(duration, s, flags);
  750. if (ret == TRACE_TYPE_PARTIAL_LINE)
  751. return TRACE_TYPE_PARTIAL_LINE;
  752. /* Function */
  753. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  754. ret = trace_seq_putc(s, ' ');
  755. if (!ret)
  756. return TRACE_TYPE_PARTIAL_LINE;
  757. }
  758. ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
  759. if (!ret)
  760. return TRACE_TYPE_PARTIAL_LINE;
  761. return TRACE_TYPE_HANDLED;
  762. }
  763. static enum print_line_t
  764. print_graph_entry_nested(struct trace_iterator *iter,
  765. struct ftrace_graph_ent_entry *entry,
  766. struct trace_seq *s, int cpu, u32 flags)
  767. {
  768. struct ftrace_graph_ent *call = &entry->graph_ent;
  769. struct fgraph_data *data = iter->private;
  770. int ret;
  771. int i;
  772. if (data) {
  773. struct fgraph_cpu_data *cpu_data;
  774. int cpu = iter->cpu;
  775. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  776. cpu_data->depth = call->depth;
  777. /* Save this function pointer to see if the exit matches */
  778. if (call->depth < FTRACE_RETFUNC_DEPTH)
  779. cpu_data->enter_funcs[call->depth] = call->func;
  780. }
  781. /* No time */
  782. ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
  783. if (ret != TRACE_TYPE_HANDLED)
  784. return ret;
  785. /* Function */
  786. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  787. ret = trace_seq_putc(s, ' ');
  788. if (!ret)
  789. return TRACE_TYPE_PARTIAL_LINE;
  790. }
  791. ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  792. if (!ret)
  793. return TRACE_TYPE_PARTIAL_LINE;
  794. /*
  795. * we already consumed the current entry to check the next one
  796. * and see if this is a leaf.
  797. */
  798. return TRACE_TYPE_NO_CONSUME;
  799. }
  800. static enum print_line_t
  801. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  802. int type, unsigned long addr, u32 flags)
  803. {
  804. struct fgraph_data *data = iter->private;
  805. struct trace_entry *ent = iter->ent;
  806. int cpu = iter->cpu;
  807. int ret;
  808. /* Pid */
  809. if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
  810. return TRACE_TYPE_PARTIAL_LINE;
  811. if (type) {
  812. /* Interrupt */
  813. ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  814. if (ret == TRACE_TYPE_PARTIAL_LINE)
  815. return TRACE_TYPE_PARTIAL_LINE;
  816. }
  817. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  818. return 0;
  819. /* Absolute time */
  820. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  821. ret = print_graph_abs_time(iter->ts, s);
  822. if (!ret)
  823. return TRACE_TYPE_PARTIAL_LINE;
  824. }
  825. /* Cpu */
  826. if (flags & TRACE_GRAPH_PRINT_CPU) {
  827. ret = print_graph_cpu(s, cpu);
  828. if (ret == TRACE_TYPE_PARTIAL_LINE)
  829. return TRACE_TYPE_PARTIAL_LINE;
  830. }
  831. /* Proc */
  832. if (flags & TRACE_GRAPH_PRINT_PROC) {
  833. ret = print_graph_proc(s, ent->pid);
  834. if (ret == TRACE_TYPE_PARTIAL_LINE)
  835. return TRACE_TYPE_PARTIAL_LINE;
  836. ret = trace_seq_puts(s, " | ");
  837. if (!ret)
  838. return TRACE_TYPE_PARTIAL_LINE;
  839. }
  840. /* Latency format */
  841. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  842. ret = print_graph_lat_fmt(s, ent);
  843. if (ret == TRACE_TYPE_PARTIAL_LINE)
  844. return TRACE_TYPE_PARTIAL_LINE;
  845. }
  846. return 0;
  847. }
  848. /*
  849. * Entry check for irq code
  850. *
  851. * returns 1 if
  852. * - we are inside irq code
  853. * - we just entered irq code
  854. *
  855. * retunns 0 if
  856. * - funcgraph-interrupts option is set
  857. * - we are not inside irq code
  858. */
  859. static int
  860. check_irq_entry(struct trace_iterator *iter, u32 flags,
  861. unsigned long addr, int depth)
  862. {
  863. int cpu = iter->cpu;
  864. int *depth_irq;
  865. struct fgraph_data *data = iter->private;
  866. /*
  867. * If we are either displaying irqs, or we got called as
  868. * a graph event and private data does not exist,
  869. * then we bypass the irq check.
  870. */
  871. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  872. (!data))
  873. return 0;
  874. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  875. /*
  876. * We are inside the irq code
  877. */
  878. if (*depth_irq >= 0)
  879. return 1;
  880. if ((addr < (unsigned long)__irqentry_text_start) ||
  881. (addr >= (unsigned long)__irqentry_text_end))
  882. return 0;
  883. /*
  884. * We are entering irq code.
  885. */
  886. *depth_irq = depth;
  887. return 1;
  888. }
  889. /*
  890. * Return check for irq code
  891. *
  892. * returns 1 if
  893. * - we are inside irq code
  894. * - we just left irq code
  895. *
  896. * returns 0 if
  897. * - funcgraph-interrupts option is set
  898. * - we are not inside irq code
  899. */
  900. static int
  901. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  902. {
  903. int cpu = iter->cpu;
  904. int *depth_irq;
  905. struct fgraph_data *data = iter->private;
  906. /*
  907. * If we are either displaying irqs, or we got called as
  908. * a graph event and private data does not exist,
  909. * then we bypass the irq check.
  910. */
  911. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  912. (!data))
  913. return 0;
  914. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  915. /*
  916. * We are not inside the irq code.
  917. */
  918. if (*depth_irq == -1)
  919. return 0;
  920. /*
  921. * We are inside the irq code, and this is returning entry.
  922. * Let's not trace it and clear the entry depth, since
  923. * we are out of irq code.
  924. *
  925. * This condition ensures that we 'leave the irq code' once
  926. * we are out of the entry depth. Thus protecting us from
  927. * the RETURN entry loss.
  928. */
  929. if (*depth_irq >= depth) {
  930. *depth_irq = -1;
  931. return 1;
  932. }
  933. /*
  934. * We are inside the irq code, and this is not the entry.
  935. */
  936. return 1;
  937. }
  938. static enum print_line_t
  939. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  940. struct trace_iterator *iter, u32 flags)
  941. {
  942. struct fgraph_data *data = iter->private;
  943. struct ftrace_graph_ent *call = &field->graph_ent;
  944. struct ftrace_graph_ret_entry *leaf_ret;
  945. static enum print_line_t ret;
  946. int cpu = iter->cpu;
  947. if (check_irq_entry(iter, flags, call->func, call->depth))
  948. return TRACE_TYPE_HANDLED;
  949. if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
  950. return TRACE_TYPE_PARTIAL_LINE;
  951. leaf_ret = get_return_for_leaf(iter, field);
  952. if (leaf_ret)
  953. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  954. else
  955. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  956. if (data) {
  957. /*
  958. * If we failed to write our output, then we need to make
  959. * note of it. Because we already consumed our entry.
  960. */
  961. if (s->full) {
  962. data->failed = 1;
  963. data->cpu = cpu;
  964. } else
  965. data->failed = 0;
  966. }
  967. return ret;
  968. }
  969. static enum print_line_t
  970. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  971. struct trace_entry *ent, struct trace_iterator *iter,
  972. u32 flags)
  973. {
  974. unsigned long long duration = trace->rettime - trace->calltime;
  975. struct fgraph_data *data = iter->private;
  976. pid_t pid = ent->pid;
  977. int cpu = iter->cpu;
  978. int func_match = 1;
  979. int ret;
  980. int i;
  981. if (check_irq_return(iter, flags, trace->depth))
  982. return TRACE_TYPE_HANDLED;
  983. if (data) {
  984. struct fgraph_cpu_data *cpu_data;
  985. int cpu = iter->cpu;
  986. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  987. /*
  988. * Comments display at + 1 to depth. This is the
  989. * return from a function, we now want the comments
  990. * to display at the same level of the bracket.
  991. */
  992. cpu_data->depth = trace->depth - 1;
  993. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  994. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  995. func_match = 0;
  996. cpu_data->enter_funcs[trace->depth] = 0;
  997. }
  998. }
  999. if (print_graph_prologue(iter, s, 0, 0, flags))
  1000. return TRACE_TYPE_PARTIAL_LINE;
  1001. /* Overhead and duration */
  1002. ret = print_graph_duration(duration, s, flags);
  1003. if (ret == TRACE_TYPE_PARTIAL_LINE)
  1004. return TRACE_TYPE_PARTIAL_LINE;
  1005. /* Closing brace */
  1006. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  1007. ret = trace_seq_putc(s, ' ');
  1008. if (!ret)
  1009. return TRACE_TYPE_PARTIAL_LINE;
  1010. }
  1011. /*
  1012. * If the return function does not have a matching entry,
  1013. * then the entry was lost. Instead of just printing
  1014. * the '}' and letting the user guess what function this
  1015. * belongs to, write out the function name. Always do
  1016. * that if the funcgraph-tail option is enabled.
  1017. */
  1018. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
  1019. ret = trace_seq_puts(s, "}\n");
  1020. if (!ret)
  1021. return TRACE_TYPE_PARTIAL_LINE;
  1022. } else {
  1023. ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  1024. if (!ret)
  1025. return TRACE_TYPE_PARTIAL_LINE;
  1026. }
  1027. /* Overrun */
  1028. if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
  1029. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  1030. trace->overrun);
  1031. if (!ret)
  1032. return TRACE_TYPE_PARTIAL_LINE;
  1033. }
  1034. ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  1035. cpu, pid, flags);
  1036. if (ret == TRACE_TYPE_PARTIAL_LINE)
  1037. return TRACE_TYPE_PARTIAL_LINE;
  1038. return TRACE_TYPE_HANDLED;
  1039. }
  1040. static enum print_line_t
  1041. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  1042. struct trace_iterator *iter, u32 flags)
  1043. {
  1044. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1045. struct fgraph_data *data = iter->private;
  1046. struct trace_event *event;
  1047. int depth = 0;
  1048. int ret;
  1049. int i;
  1050. if (data)
  1051. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  1052. if (print_graph_prologue(iter, s, 0, 0, flags))
  1053. return TRACE_TYPE_PARTIAL_LINE;
  1054. /* No time */
  1055. ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
  1056. if (ret != TRACE_TYPE_HANDLED)
  1057. return ret;
  1058. /* Indentation */
  1059. if (depth > 0)
  1060. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
  1061. ret = trace_seq_putc(s, ' ');
  1062. if (!ret)
  1063. return TRACE_TYPE_PARTIAL_LINE;
  1064. }
  1065. /* The comment */
  1066. ret = trace_seq_puts(s, "/* ");
  1067. if (!ret)
  1068. return TRACE_TYPE_PARTIAL_LINE;
  1069. switch (iter->ent->type) {
  1070. case TRACE_BPRINT:
  1071. ret = trace_print_bprintk_msg_only(iter);
  1072. if (ret != TRACE_TYPE_HANDLED)
  1073. return ret;
  1074. break;
  1075. case TRACE_PRINT:
  1076. ret = trace_print_printk_msg_only(iter);
  1077. if (ret != TRACE_TYPE_HANDLED)
  1078. return ret;
  1079. break;
  1080. default:
  1081. event = ftrace_find_event(ent->type);
  1082. if (!event)
  1083. return TRACE_TYPE_UNHANDLED;
  1084. ret = event->funcs->trace(iter, sym_flags, event);
  1085. if (ret != TRACE_TYPE_HANDLED)
  1086. return ret;
  1087. }
  1088. /* Strip ending newline */
  1089. if (s->buffer[s->len - 1] == '\n') {
  1090. s->buffer[s->len - 1] = '\0';
  1091. s->len--;
  1092. }
  1093. ret = trace_seq_puts(s, " */\n");
  1094. if (!ret)
  1095. return TRACE_TYPE_PARTIAL_LINE;
  1096. return TRACE_TYPE_HANDLED;
  1097. }
  1098. enum print_line_t
  1099. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1100. {
  1101. struct ftrace_graph_ent_entry *field;
  1102. struct fgraph_data *data = iter->private;
  1103. struct trace_entry *entry = iter->ent;
  1104. struct trace_seq *s = &iter->seq;
  1105. int cpu = iter->cpu;
  1106. int ret;
  1107. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1108. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1109. return TRACE_TYPE_HANDLED;
  1110. }
  1111. /*
  1112. * If the last output failed, there's a possibility we need
  1113. * to print out the missing entry which would never go out.
  1114. */
  1115. if (data && data->failed) {
  1116. field = &data->ent;
  1117. iter->cpu = data->cpu;
  1118. ret = print_graph_entry(field, s, iter, flags);
  1119. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1120. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1121. ret = TRACE_TYPE_NO_CONSUME;
  1122. }
  1123. iter->cpu = cpu;
  1124. return ret;
  1125. }
  1126. switch (entry->type) {
  1127. case TRACE_GRAPH_ENT: {
  1128. /*
  1129. * print_graph_entry() may consume the current event,
  1130. * thus @field may become invalid, so we need to save it.
  1131. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1132. * it can be safely saved at the stack.
  1133. */
  1134. struct ftrace_graph_ent_entry saved;
  1135. trace_assign_type(field, entry);
  1136. saved = *field;
  1137. return print_graph_entry(&saved, s, iter, flags);
  1138. }
  1139. case TRACE_GRAPH_RET: {
  1140. struct ftrace_graph_ret_entry *field;
  1141. trace_assign_type(field, entry);
  1142. return print_graph_return(&field->ret, s, entry, iter, flags);
  1143. }
  1144. case TRACE_STACK:
  1145. case TRACE_FN:
  1146. /* dont trace stack and functions as comments */
  1147. return TRACE_TYPE_UNHANDLED;
  1148. default:
  1149. return print_graph_comment(s, entry, iter, flags);
  1150. }
  1151. return TRACE_TYPE_HANDLED;
  1152. }
  1153. static enum print_line_t
  1154. print_graph_function(struct trace_iterator *iter)
  1155. {
  1156. return print_graph_function_flags(iter, tracer_flags.val);
  1157. }
  1158. static enum print_line_t
  1159. print_graph_function_event(struct trace_iterator *iter, int flags,
  1160. struct trace_event *event)
  1161. {
  1162. return print_graph_function(iter);
  1163. }
  1164. static void print_lat_header(struct seq_file *s, u32 flags)
  1165. {
  1166. static const char spaces[] = " " /* 16 spaces */
  1167. " " /* 4 spaces */
  1168. " "; /* 17 spaces */
  1169. int size = 0;
  1170. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1171. size += 16;
  1172. if (flags & TRACE_GRAPH_PRINT_CPU)
  1173. size += 4;
  1174. if (flags & TRACE_GRAPH_PRINT_PROC)
  1175. size += 17;
  1176. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1177. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1178. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1179. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1180. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1181. }
  1182. static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
  1183. {
  1184. int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
  1185. if (lat)
  1186. print_lat_header(s, flags);
  1187. /* 1st line */
  1188. seq_printf(s, "#");
  1189. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1190. seq_printf(s, " TIME ");
  1191. if (flags & TRACE_GRAPH_PRINT_CPU)
  1192. seq_printf(s, " CPU");
  1193. if (flags & TRACE_GRAPH_PRINT_PROC)
  1194. seq_printf(s, " TASK/PID ");
  1195. if (lat)
  1196. seq_printf(s, "||||");
  1197. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1198. seq_printf(s, " DURATION ");
  1199. seq_printf(s, " FUNCTION CALLS\n");
  1200. /* 2nd line */
  1201. seq_printf(s, "#");
  1202. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1203. seq_printf(s, " | ");
  1204. if (flags & TRACE_GRAPH_PRINT_CPU)
  1205. seq_printf(s, " | ");
  1206. if (flags & TRACE_GRAPH_PRINT_PROC)
  1207. seq_printf(s, " | | ");
  1208. if (lat)
  1209. seq_printf(s, "||||");
  1210. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1211. seq_printf(s, " | | ");
  1212. seq_printf(s, " | | | |\n");
  1213. }
  1214. static void print_graph_headers(struct seq_file *s)
  1215. {
  1216. print_graph_headers_flags(s, tracer_flags.val);
  1217. }
  1218. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1219. {
  1220. struct trace_iterator *iter = s->private;
  1221. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  1222. return;
  1223. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  1224. /* print nothing if the buffers are empty */
  1225. if (trace_empty(iter))
  1226. return;
  1227. print_trace_header(s, iter);
  1228. }
  1229. __print_graph_headers_flags(s, flags);
  1230. }
  1231. void graph_trace_open(struct trace_iterator *iter)
  1232. {
  1233. /* pid and depth on the last trace processed */
  1234. struct fgraph_data *data;
  1235. int cpu;
  1236. iter->private = NULL;
  1237. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1238. if (!data)
  1239. goto out_err;
  1240. data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
  1241. if (!data->cpu_data)
  1242. goto out_err_free;
  1243. for_each_possible_cpu(cpu) {
  1244. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1245. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1246. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1247. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1248. *pid = -1;
  1249. *depth = 0;
  1250. *ignore = 0;
  1251. *depth_irq = -1;
  1252. }
  1253. iter->private = data;
  1254. return;
  1255. out_err_free:
  1256. kfree(data);
  1257. out_err:
  1258. pr_warning("function graph tracer: not enough memory\n");
  1259. }
  1260. void graph_trace_close(struct trace_iterator *iter)
  1261. {
  1262. struct fgraph_data *data = iter->private;
  1263. if (data) {
  1264. free_percpu(data->cpu_data);
  1265. kfree(data);
  1266. }
  1267. }
  1268. static int
  1269. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1270. {
  1271. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1272. ftrace_graph_skip_irqs = !set;
  1273. return 0;
  1274. }
  1275. static struct trace_event_functions graph_functions = {
  1276. .trace = print_graph_function_event,
  1277. };
  1278. static struct trace_event graph_trace_entry_event = {
  1279. .type = TRACE_GRAPH_ENT,
  1280. .funcs = &graph_functions,
  1281. };
  1282. static struct trace_event graph_trace_ret_event = {
  1283. .type = TRACE_GRAPH_RET,
  1284. .funcs = &graph_functions
  1285. };
  1286. static struct tracer graph_trace __tracer_data = {
  1287. .name = "function_graph",
  1288. .update_thresh = graph_trace_update_thresh,
  1289. .open = graph_trace_open,
  1290. .pipe_open = graph_trace_open,
  1291. .close = graph_trace_close,
  1292. .pipe_close = graph_trace_close,
  1293. .init = graph_trace_init,
  1294. .reset = graph_trace_reset,
  1295. .print_line = print_graph_function,
  1296. .print_header = print_graph_headers,
  1297. .flags = &tracer_flags,
  1298. .set_flag = func_graph_set_flag,
  1299. #ifdef CONFIG_FTRACE_SELFTEST
  1300. .selftest = trace_selftest_startup_function_graph,
  1301. #endif
  1302. };
  1303. static ssize_t
  1304. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1305. loff_t *ppos)
  1306. {
  1307. unsigned long val;
  1308. int ret;
  1309. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1310. if (ret)
  1311. return ret;
  1312. max_depth = val;
  1313. *ppos += cnt;
  1314. return cnt;
  1315. }
  1316. static ssize_t
  1317. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1318. loff_t *ppos)
  1319. {
  1320. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1321. int n;
  1322. n = sprintf(buf, "%d\n", max_depth);
  1323. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1324. }
  1325. static const struct file_operations graph_depth_fops = {
  1326. .open = tracing_open_generic,
  1327. .write = graph_depth_write,
  1328. .read = graph_depth_read,
  1329. .llseek = generic_file_llseek,
  1330. };
  1331. static __init int init_graph_debugfs(void)
  1332. {
  1333. struct dentry *d_tracer;
  1334. d_tracer = tracing_init_dentry();
  1335. if (!d_tracer)
  1336. return 0;
  1337. trace_create_file("max_graph_depth", 0644, d_tracer,
  1338. NULL, &graph_depth_fops);
  1339. return 0;
  1340. }
  1341. fs_initcall(init_graph_debugfs);
  1342. static __init int init_graph_trace(void)
  1343. {
  1344. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1345. if (!register_ftrace_event(&graph_trace_entry_event)) {
  1346. pr_warning("Warning: could not register graph trace events\n");
  1347. return 1;
  1348. }
  1349. if (!register_ftrace_event(&graph_trace_ret_event)) {
  1350. pr_warning("Warning: could not register graph trace events\n");
  1351. return 1;
  1352. }
  1353. return register_tracer(&graph_trace);
  1354. }
  1355. core_initcall(init_graph_trace);