trace_functions_graph.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. *
  4. * Function graph tracer.
  5. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  6. * Mostly borrowed from function tracer which
  7. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  8. *
  9. */
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/slab.h>
  14. #include <linux/fs.h>
  15. #include "trace.h"
  16. #include "trace_output.h"
  17. static bool kill_ftrace_graph;
  18. /**
  19. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  20. *
  21. * ftrace_graph_stop() is called when a severe error is detected in
  22. * the function graph tracing. This function is called by the critical
  23. * paths of function graph to keep those paths from doing any more harm.
  24. */
  25. bool ftrace_graph_is_dead(void)
  26. {
  27. return kill_ftrace_graph;
  28. }
  29. /**
  30. * ftrace_graph_stop - set to permanently disable function graph tracincg
  31. *
  32. * In case of an error int function graph tracing, this is called
  33. * to try to keep function graph tracing from causing any more harm.
  34. * Usually this is pretty severe and this is called to try to at least
  35. * get a warning out to the user.
  36. */
  37. void ftrace_graph_stop(void)
  38. {
  39. kill_ftrace_graph = true;
  40. }
  41. /* When set, irq functions will be ignored */
  42. static int ftrace_graph_skip_irqs;
  43. struct fgraph_cpu_data {
  44. pid_t last_pid;
  45. int depth;
  46. int depth_irq;
  47. int ignore;
  48. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  49. };
  50. struct fgraph_data {
  51. struct fgraph_cpu_data __percpu *cpu_data;
  52. /* Place to preserve last processed entry. */
  53. struct ftrace_graph_ent_entry ent;
  54. struct ftrace_graph_ret_entry ret;
  55. int failed;
  56. int cpu;
  57. };
  58. #define TRACE_GRAPH_INDENT 2
  59. unsigned int fgraph_max_depth;
  60. static struct tracer_opt trace_opts[] = {
  61. /* Display overruns? (for self-debug purpose) */
  62. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  63. /* Display CPU ? */
  64. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  65. /* Display Overhead ? */
  66. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  67. /* Display proc name/pid */
  68. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  69. /* Display duration of execution */
  70. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  71. /* Display absolute time of an entry */
  72. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  73. /* Display interrupts */
  74. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  75. /* Display function name after trailing } */
  76. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  77. /* Include sleep time (scheduled out) between entry and return */
  78. { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  79. /* Include time within nested functions */
  80. { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  81. { } /* Empty entry */
  82. };
  83. static struct tracer_flags tracer_flags = {
  84. /* Don't display overruns, proc, or tail by default */
  85. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  86. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  87. TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  88. .opts = trace_opts
  89. };
  90. static struct trace_array *graph_array;
  91. /*
  92. * DURATION column is being also used to display IRQ signs,
  93. * following values are used by print_graph_irq and others
  94. * to fill in space into DURATION column.
  95. */
  96. enum {
  97. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  98. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  99. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  100. };
  101. static void
  102. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  103. struct trace_seq *s, u32 flags);
  104. /* Add a function return address to the trace stack on thread info.*/
  105. int
  106. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  107. unsigned long frame_pointer, unsigned long *retp)
  108. {
  109. unsigned long long calltime;
  110. int index;
  111. if (unlikely(ftrace_graph_is_dead()))
  112. return -EBUSY;
  113. if (!current->ret_stack)
  114. return -EBUSY;
  115. /*
  116. * We must make sure the ret_stack is tested before we read
  117. * anything else.
  118. */
  119. smp_rmb();
  120. /* The return trace stack is full */
  121. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  122. atomic_inc(&current->trace_overrun);
  123. return -EBUSY;
  124. }
  125. /*
  126. * The curr_ret_stack is an index to ftrace return stack of
  127. * current task. Its value should be in [0, FTRACE_RETFUNC_
  128. * DEPTH) when the function graph tracer is used. To support
  129. * filtering out specific functions, it makes the index
  130. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  131. * so when it sees a negative index the ftrace will ignore
  132. * the record. And the index gets recovered when returning
  133. * from the filtered function by adding the FTRACE_NOTRACE_
  134. * DEPTH and then it'll continue to record functions normally.
  135. *
  136. * The curr_ret_stack is initialized to -1 and get increased
  137. * in this function. So it can be less than -1 only if it was
  138. * filtered out via ftrace_graph_notrace_addr() which can be
  139. * set from set_graph_notrace file in tracefs by user.
  140. */
  141. if (current->curr_ret_stack < -1)
  142. return -EBUSY;
  143. calltime = trace_clock_local();
  144. index = ++current->curr_ret_stack;
  145. if (ftrace_graph_notrace_addr(func))
  146. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  147. barrier();
  148. current->ret_stack[index].ret = ret;
  149. current->ret_stack[index].func = func;
  150. current->ret_stack[index].calltime = calltime;
  151. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  152. current->ret_stack[index].fp = frame_pointer;
  153. #endif
  154. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  155. current->ret_stack[index].retp = retp;
  156. #endif
  157. *depth = current->curr_ret_stack;
  158. return 0;
  159. }
  160. int function_graph_enter(unsigned long ret, unsigned long func,
  161. unsigned long frame_pointer, unsigned long *retp)
  162. {
  163. struct ftrace_graph_ent trace;
  164. trace.func = func;
  165. trace.depth = current->curr_ret_stack + 1;
  166. /* Only trace if the calling function expects to */
  167. if (!ftrace_graph_entry(&trace))
  168. return -EBUSY;
  169. return ftrace_push_return_trace(ret, func, &trace.depth,
  170. frame_pointer, retp);
  171. }
  172. /* Retrieve a function return address to the trace stack on thread info.*/
  173. static void
  174. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  175. unsigned long frame_pointer)
  176. {
  177. int index;
  178. index = current->curr_ret_stack;
  179. /*
  180. * A negative index here means that it's just returned from a
  181. * notrace'd function. Recover index to get an original
  182. * return address. See ftrace_push_return_trace().
  183. *
  184. * TODO: Need to check whether the stack gets corrupted.
  185. */
  186. if (index < 0)
  187. index += FTRACE_NOTRACE_DEPTH;
  188. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  189. ftrace_graph_stop();
  190. WARN_ON(1);
  191. /* Might as well panic, otherwise we have no where to go */
  192. *ret = (unsigned long)panic;
  193. return;
  194. }
  195. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  196. /*
  197. * The arch may choose to record the frame pointer used
  198. * and check it here to make sure that it is what we expect it
  199. * to be. If gcc does not set the place holder of the return
  200. * address in the frame pointer, and does a copy instead, then
  201. * the function graph trace will fail. This test detects this
  202. * case.
  203. *
  204. * Currently, x86_32 with optimize for size (-Os) makes the latest
  205. * gcc do the above.
  206. *
  207. * Note, -mfentry does not use frame pointers, and this test
  208. * is not needed if CC_USING_FENTRY is set.
  209. */
  210. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  211. ftrace_graph_stop();
  212. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  213. " from func %ps return to %lx\n",
  214. current->ret_stack[index].fp,
  215. frame_pointer,
  216. (void *)current->ret_stack[index].func,
  217. current->ret_stack[index].ret);
  218. *ret = (unsigned long)panic;
  219. return;
  220. }
  221. #endif
  222. *ret = current->ret_stack[index].ret;
  223. trace->func = current->ret_stack[index].func;
  224. trace->calltime = current->ret_stack[index].calltime;
  225. trace->overrun = atomic_read(&current->trace_overrun);
  226. trace->depth = index;
  227. }
  228. /*
  229. * Send the trace to the ring-buffer.
  230. * @return the original return address.
  231. */
  232. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  233. {
  234. struct ftrace_graph_ret trace;
  235. unsigned long ret;
  236. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  237. trace.rettime = trace_clock_local();
  238. barrier();
  239. current->curr_ret_stack--;
  240. /*
  241. * The curr_ret_stack can be less than -1 only if it was
  242. * filtered out and it's about to return from the function.
  243. * Recover the index and continue to trace normal functions.
  244. */
  245. if (current->curr_ret_stack < -1) {
  246. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  247. return ret;
  248. }
  249. /*
  250. * The trace should run after decrementing the ret counter
  251. * in case an interrupt were to come in. We don't want to
  252. * lose the interrupt if max_depth is set.
  253. */
  254. ftrace_graph_return(&trace);
  255. if (unlikely(!ret)) {
  256. ftrace_graph_stop();
  257. WARN_ON(1);
  258. /* Might as well panic. What else to do? */
  259. ret = (unsigned long)panic;
  260. }
  261. return ret;
  262. }
  263. /**
  264. * ftrace_graph_ret_addr - convert a potentially modified stack return address
  265. * to its original value
  266. *
  267. * This function can be called by stack unwinding code to convert a found stack
  268. * return address ('ret') to its original value, in case the function graph
  269. * tracer has modified it to be 'return_to_handler'. If the address hasn't
  270. * been modified, the unchanged value of 'ret' is returned.
  271. *
  272. * 'idx' is a state variable which should be initialized by the caller to zero
  273. * before the first call.
  274. *
  275. * 'retp' is a pointer to the return address on the stack. It's ignored if
  276. * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
  277. */
  278. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  279. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  280. unsigned long ret, unsigned long *retp)
  281. {
  282. int index = task->curr_ret_stack;
  283. int i;
  284. if (ret != (unsigned long)return_to_handler)
  285. return ret;
  286. if (index < -1)
  287. index += FTRACE_NOTRACE_DEPTH;
  288. if (index < 0)
  289. return ret;
  290. for (i = 0; i <= index; i++)
  291. if (task->ret_stack[i].retp == retp)
  292. return task->ret_stack[i].ret;
  293. return ret;
  294. }
  295. #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  296. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  297. unsigned long ret, unsigned long *retp)
  298. {
  299. int task_idx;
  300. if (ret != (unsigned long)return_to_handler)
  301. return ret;
  302. task_idx = task->curr_ret_stack;
  303. if (!task->ret_stack || task_idx < *idx)
  304. return ret;
  305. task_idx -= *idx;
  306. (*idx)++;
  307. return task->ret_stack[task_idx].ret;
  308. }
  309. #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  310. int __trace_graph_entry(struct trace_array *tr,
  311. struct ftrace_graph_ent *trace,
  312. unsigned long flags,
  313. int pc)
  314. {
  315. struct trace_event_call *call = &event_funcgraph_entry;
  316. struct ring_buffer_event *event;
  317. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  318. struct ftrace_graph_ent_entry *entry;
  319. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  320. sizeof(*entry), flags, pc);
  321. if (!event)
  322. return 0;
  323. entry = ring_buffer_event_data(event);
  324. entry->graph_ent = *trace;
  325. if (!call_filter_check_discard(call, entry, buffer, event))
  326. trace_buffer_unlock_commit_nostack(buffer, event);
  327. return 1;
  328. }
  329. static inline int ftrace_graph_ignore_irqs(void)
  330. {
  331. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  332. return 0;
  333. return in_irq();
  334. }
  335. int trace_graph_entry(struct ftrace_graph_ent *trace)
  336. {
  337. struct trace_array *tr = graph_array;
  338. struct trace_array_cpu *data;
  339. unsigned long flags;
  340. long disabled;
  341. int ret;
  342. int cpu;
  343. int pc;
  344. if (!ftrace_trace_task(tr))
  345. return 0;
  346. if (ftrace_graph_ignore_func(trace))
  347. return 0;
  348. if (ftrace_graph_ignore_irqs())
  349. return 0;
  350. /*
  351. * Do not trace a function if it's filtered by set_graph_notrace.
  352. * Make the index of ret stack negative to indicate that it should
  353. * ignore further functions. But it needs its own ret stack entry
  354. * to recover the original index in order to continue tracing after
  355. * returning from the function.
  356. */
  357. if (ftrace_graph_notrace_addr(trace->func))
  358. return 1;
  359. /*
  360. * Stop here if tracing_threshold is set. We only write function return
  361. * events to the ring buffer.
  362. */
  363. if (tracing_thresh)
  364. return 1;
  365. local_irq_save(flags);
  366. cpu = raw_smp_processor_id();
  367. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  368. disabled = atomic_inc_return(&data->disabled);
  369. if (likely(disabled == 1)) {
  370. pc = preempt_count();
  371. ret = __trace_graph_entry(tr, trace, flags, pc);
  372. } else {
  373. ret = 0;
  374. }
  375. atomic_dec(&data->disabled);
  376. local_irq_restore(flags);
  377. return ret;
  378. }
  379. static void
  380. __trace_graph_function(struct trace_array *tr,
  381. unsigned long ip, unsigned long flags, int pc)
  382. {
  383. u64 time = trace_clock_local();
  384. struct ftrace_graph_ent ent = {
  385. .func = ip,
  386. .depth = 0,
  387. };
  388. struct ftrace_graph_ret ret = {
  389. .func = ip,
  390. .depth = 0,
  391. .calltime = time,
  392. .rettime = time,
  393. };
  394. __trace_graph_entry(tr, &ent, flags, pc);
  395. __trace_graph_return(tr, &ret, flags, pc);
  396. }
  397. void
  398. trace_graph_function(struct trace_array *tr,
  399. unsigned long ip, unsigned long parent_ip,
  400. unsigned long flags, int pc)
  401. {
  402. __trace_graph_function(tr, ip, flags, pc);
  403. }
  404. void __trace_graph_return(struct trace_array *tr,
  405. struct ftrace_graph_ret *trace,
  406. unsigned long flags,
  407. int pc)
  408. {
  409. struct trace_event_call *call = &event_funcgraph_exit;
  410. struct ring_buffer_event *event;
  411. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  412. struct ftrace_graph_ret_entry *entry;
  413. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  414. sizeof(*entry), flags, pc);
  415. if (!event)
  416. return;
  417. entry = ring_buffer_event_data(event);
  418. entry->ret = *trace;
  419. if (!call_filter_check_discard(call, entry, buffer, event))
  420. trace_buffer_unlock_commit_nostack(buffer, event);
  421. }
  422. void trace_graph_return(struct ftrace_graph_ret *trace)
  423. {
  424. struct trace_array *tr = graph_array;
  425. struct trace_array_cpu *data;
  426. unsigned long flags;
  427. long disabled;
  428. int cpu;
  429. int pc;
  430. local_irq_save(flags);
  431. cpu = raw_smp_processor_id();
  432. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  433. disabled = atomic_inc_return(&data->disabled);
  434. if (likely(disabled == 1)) {
  435. pc = preempt_count();
  436. __trace_graph_return(tr, trace, flags, pc);
  437. }
  438. atomic_dec(&data->disabled);
  439. local_irq_restore(flags);
  440. }
  441. void set_graph_array(struct trace_array *tr)
  442. {
  443. graph_array = tr;
  444. /* Make graph_array visible before we start tracing */
  445. smp_mb();
  446. }
  447. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  448. {
  449. if (tracing_thresh &&
  450. (trace->rettime - trace->calltime < tracing_thresh))
  451. return;
  452. else
  453. trace_graph_return(trace);
  454. }
  455. static int graph_trace_init(struct trace_array *tr)
  456. {
  457. int ret;
  458. set_graph_array(tr);
  459. if (tracing_thresh)
  460. ret = register_ftrace_graph(&trace_graph_thresh_return,
  461. &trace_graph_entry);
  462. else
  463. ret = register_ftrace_graph(&trace_graph_return,
  464. &trace_graph_entry);
  465. if (ret)
  466. return ret;
  467. tracing_start_cmdline_record();
  468. return 0;
  469. }
  470. static void graph_trace_reset(struct trace_array *tr)
  471. {
  472. tracing_stop_cmdline_record();
  473. unregister_ftrace_graph();
  474. }
  475. static int graph_trace_update_thresh(struct trace_array *tr)
  476. {
  477. graph_trace_reset(tr);
  478. return graph_trace_init(tr);
  479. }
  480. static int max_bytes_for_cpu;
  481. static void print_graph_cpu(struct trace_seq *s, int cpu)
  482. {
  483. /*
  484. * Start with a space character - to make it stand out
  485. * to the right a bit when trace output is pasted into
  486. * email:
  487. */
  488. trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  489. }
  490. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  491. static void print_graph_proc(struct trace_seq *s, pid_t pid)
  492. {
  493. char comm[TASK_COMM_LEN];
  494. /* sign + log10(MAX_INT) + '\0' */
  495. char pid_str[11];
  496. int spaces = 0;
  497. int len;
  498. int i;
  499. trace_find_cmdline(pid, comm);
  500. comm[7] = '\0';
  501. sprintf(pid_str, "%d", pid);
  502. /* 1 stands for the "-" character */
  503. len = strlen(comm) + strlen(pid_str) + 1;
  504. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  505. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  506. /* First spaces to align center */
  507. for (i = 0; i < spaces / 2; i++)
  508. trace_seq_putc(s, ' ');
  509. trace_seq_printf(s, "%s-%s", comm, pid_str);
  510. /* Last spaces to align center */
  511. for (i = 0; i < spaces - (spaces / 2); i++)
  512. trace_seq_putc(s, ' ');
  513. }
  514. static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  515. {
  516. trace_seq_putc(s, ' ');
  517. trace_print_lat_fmt(s, entry);
  518. }
  519. /* If the pid changed since the last trace, output this event */
  520. static void
  521. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  522. {
  523. pid_t prev_pid;
  524. pid_t *last_pid;
  525. if (!data)
  526. return;
  527. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  528. if (*last_pid == pid)
  529. return;
  530. prev_pid = *last_pid;
  531. *last_pid = pid;
  532. if (prev_pid == -1)
  533. return;
  534. /*
  535. * Context-switch trace line:
  536. ------------------------------------------
  537. | 1) migration/0--1 => sshd-1755
  538. ------------------------------------------
  539. */
  540. trace_seq_puts(s, " ------------------------------------------\n");
  541. print_graph_cpu(s, cpu);
  542. print_graph_proc(s, prev_pid);
  543. trace_seq_puts(s, " => ");
  544. print_graph_proc(s, pid);
  545. trace_seq_puts(s, "\n ------------------------------------------\n\n");
  546. }
  547. static struct ftrace_graph_ret_entry *
  548. get_return_for_leaf(struct trace_iterator *iter,
  549. struct ftrace_graph_ent_entry *curr)
  550. {
  551. struct fgraph_data *data = iter->private;
  552. struct ring_buffer_iter *ring_iter = NULL;
  553. struct ring_buffer_event *event;
  554. struct ftrace_graph_ret_entry *next;
  555. /*
  556. * If the previous output failed to write to the seq buffer,
  557. * then we just reuse the data from before.
  558. */
  559. if (data && data->failed) {
  560. curr = &data->ent;
  561. next = &data->ret;
  562. } else {
  563. ring_iter = trace_buffer_iter(iter, iter->cpu);
  564. /* First peek to compare current entry and the next one */
  565. if (ring_iter)
  566. event = ring_buffer_iter_peek(ring_iter, NULL);
  567. else {
  568. /*
  569. * We need to consume the current entry to see
  570. * the next one.
  571. */
  572. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  573. NULL, NULL);
  574. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  575. NULL, NULL);
  576. }
  577. if (!event)
  578. return NULL;
  579. next = ring_buffer_event_data(event);
  580. if (data) {
  581. /*
  582. * Save current and next entries for later reference
  583. * if the output fails.
  584. */
  585. data->ent = *curr;
  586. /*
  587. * If the next event is not a return type, then
  588. * we only care about what type it is. Otherwise we can
  589. * safely copy the entire event.
  590. */
  591. if (next->ent.type == TRACE_GRAPH_RET)
  592. data->ret = *next;
  593. else
  594. data->ret.ent.type = next->ent.type;
  595. }
  596. }
  597. if (next->ent.type != TRACE_GRAPH_RET)
  598. return NULL;
  599. if (curr->ent.pid != next->ent.pid ||
  600. curr->graph_ent.func != next->ret.func)
  601. return NULL;
  602. /* this is a leaf, now advance the iterator */
  603. if (ring_iter)
  604. ring_buffer_read(ring_iter, NULL);
  605. return next;
  606. }
  607. static void print_graph_abs_time(u64 t, struct trace_seq *s)
  608. {
  609. unsigned long usecs_rem;
  610. usecs_rem = do_div(t, NSEC_PER_SEC);
  611. usecs_rem /= 1000;
  612. trace_seq_printf(s, "%5lu.%06lu | ",
  613. (unsigned long)t, usecs_rem);
  614. }
  615. static void
  616. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  617. enum trace_type type, int cpu, pid_t pid, u32 flags)
  618. {
  619. struct trace_array *tr = iter->tr;
  620. struct trace_seq *s = &iter->seq;
  621. struct trace_entry *ent = iter->ent;
  622. if (addr < (unsigned long)__irqentry_text_start ||
  623. addr >= (unsigned long)__irqentry_text_end)
  624. return;
  625. if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
  626. /* Absolute time */
  627. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  628. print_graph_abs_time(iter->ts, s);
  629. /* Cpu */
  630. if (flags & TRACE_GRAPH_PRINT_CPU)
  631. print_graph_cpu(s, cpu);
  632. /* Proc */
  633. if (flags & TRACE_GRAPH_PRINT_PROC) {
  634. print_graph_proc(s, pid);
  635. trace_seq_puts(s, " | ");
  636. }
  637. /* Latency format */
  638. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  639. print_graph_lat_fmt(s, ent);
  640. }
  641. /* No overhead */
  642. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
  643. if (type == TRACE_GRAPH_ENT)
  644. trace_seq_puts(s, "==========>");
  645. else
  646. trace_seq_puts(s, "<==========");
  647. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
  648. trace_seq_putc(s, '\n');
  649. }
  650. void
  651. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  652. {
  653. unsigned long nsecs_rem = do_div(duration, 1000);
  654. /* log10(ULONG_MAX) + '\0' */
  655. char usecs_str[21];
  656. char nsecs_str[5];
  657. int len;
  658. int i;
  659. sprintf(usecs_str, "%lu", (unsigned long) duration);
  660. /* Print msecs */
  661. trace_seq_printf(s, "%s", usecs_str);
  662. len = strlen(usecs_str);
  663. /* Print nsecs (we don't want to exceed 7 numbers) */
  664. if (len < 7) {
  665. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  666. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  667. trace_seq_printf(s, ".%s", nsecs_str);
  668. len += strlen(nsecs_str) + 1;
  669. }
  670. trace_seq_puts(s, " us ");
  671. /* Print remaining spaces to fit the row's width */
  672. for (i = len; i < 8; i++)
  673. trace_seq_putc(s, ' ');
  674. }
  675. static void
  676. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  677. struct trace_seq *s, u32 flags)
  678. {
  679. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  680. !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  681. return;
  682. /* No real adata, just filling the column with spaces */
  683. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  684. case FLAGS_FILL_FULL:
  685. trace_seq_puts(s, " | ");
  686. return;
  687. case FLAGS_FILL_START:
  688. trace_seq_puts(s, " ");
  689. return;
  690. case FLAGS_FILL_END:
  691. trace_seq_puts(s, " |");
  692. return;
  693. }
  694. /* Signal a overhead of time execution to the output */
  695. if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
  696. trace_seq_printf(s, "%c ", trace_find_mark(duration));
  697. else
  698. trace_seq_puts(s, " ");
  699. trace_print_graph_duration(duration, s);
  700. trace_seq_puts(s, "| ");
  701. }
  702. /* Case of a leaf function on its call entry */
  703. static enum print_line_t
  704. print_graph_entry_leaf(struct trace_iterator *iter,
  705. struct ftrace_graph_ent_entry *entry,
  706. struct ftrace_graph_ret_entry *ret_entry,
  707. struct trace_seq *s, u32 flags)
  708. {
  709. struct fgraph_data *data = iter->private;
  710. struct trace_array *tr = iter->tr;
  711. struct ftrace_graph_ret *graph_ret;
  712. struct ftrace_graph_ent *call;
  713. unsigned long long duration;
  714. int cpu = iter->cpu;
  715. int i;
  716. graph_ret = &ret_entry->ret;
  717. call = &entry->graph_ent;
  718. duration = graph_ret->rettime - graph_ret->calltime;
  719. if (data) {
  720. struct fgraph_cpu_data *cpu_data;
  721. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  722. /* If a graph tracer ignored set_graph_notrace */
  723. if (call->depth < -1)
  724. call->depth += FTRACE_NOTRACE_DEPTH;
  725. /*
  726. * Comments display at + 1 to depth. Since
  727. * this is a leaf function, keep the comments
  728. * equal to this depth.
  729. */
  730. cpu_data->depth = call->depth - 1;
  731. /* No need to keep this function around for this depth */
  732. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  733. !WARN_ON_ONCE(call->depth < 0))
  734. cpu_data->enter_funcs[call->depth] = 0;
  735. }
  736. /* Overhead and duration */
  737. print_graph_duration(tr, duration, s, flags);
  738. /* Function */
  739. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  740. trace_seq_putc(s, ' ');
  741. trace_seq_printf(s, "%ps();\n", (void *)call->func);
  742. print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
  743. cpu, iter->ent->pid, flags);
  744. return trace_handle_return(s);
  745. }
  746. static enum print_line_t
  747. print_graph_entry_nested(struct trace_iterator *iter,
  748. struct ftrace_graph_ent_entry *entry,
  749. struct trace_seq *s, int cpu, u32 flags)
  750. {
  751. struct ftrace_graph_ent *call = &entry->graph_ent;
  752. struct fgraph_data *data = iter->private;
  753. struct trace_array *tr = iter->tr;
  754. int i;
  755. if (data) {
  756. struct fgraph_cpu_data *cpu_data;
  757. int cpu = iter->cpu;
  758. /* If a graph tracer ignored set_graph_notrace */
  759. if (call->depth < -1)
  760. call->depth += FTRACE_NOTRACE_DEPTH;
  761. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  762. cpu_data->depth = call->depth;
  763. /* Save this function pointer to see if the exit matches */
  764. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  765. !WARN_ON_ONCE(call->depth < 0))
  766. cpu_data->enter_funcs[call->depth] = call->func;
  767. }
  768. /* No time */
  769. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  770. /* Function */
  771. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  772. trace_seq_putc(s, ' ');
  773. trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  774. if (trace_seq_has_overflowed(s))
  775. return TRACE_TYPE_PARTIAL_LINE;
  776. /*
  777. * we already consumed the current entry to check the next one
  778. * and see if this is a leaf.
  779. */
  780. return TRACE_TYPE_NO_CONSUME;
  781. }
  782. static void
  783. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  784. int type, unsigned long addr, u32 flags)
  785. {
  786. struct fgraph_data *data = iter->private;
  787. struct trace_entry *ent = iter->ent;
  788. struct trace_array *tr = iter->tr;
  789. int cpu = iter->cpu;
  790. /* Pid */
  791. verif_pid(s, ent->pid, cpu, data);
  792. if (type)
  793. /* Interrupt */
  794. print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  795. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  796. return;
  797. /* Absolute time */
  798. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  799. print_graph_abs_time(iter->ts, s);
  800. /* Cpu */
  801. if (flags & TRACE_GRAPH_PRINT_CPU)
  802. print_graph_cpu(s, cpu);
  803. /* Proc */
  804. if (flags & TRACE_GRAPH_PRINT_PROC) {
  805. print_graph_proc(s, ent->pid);
  806. trace_seq_puts(s, " | ");
  807. }
  808. /* Latency format */
  809. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  810. print_graph_lat_fmt(s, ent);
  811. return;
  812. }
  813. /*
  814. * Entry check for irq code
  815. *
  816. * returns 1 if
  817. * - we are inside irq code
  818. * - we just entered irq code
  819. *
  820. * retunns 0 if
  821. * - funcgraph-interrupts option is set
  822. * - we are not inside irq code
  823. */
  824. static int
  825. check_irq_entry(struct trace_iterator *iter, u32 flags,
  826. unsigned long addr, int depth)
  827. {
  828. int cpu = iter->cpu;
  829. int *depth_irq;
  830. struct fgraph_data *data = iter->private;
  831. /*
  832. * If we are either displaying irqs, or we got called as
  833. * a graph event and private data does not exist,
  834. * then we bypass the irq check.
  835. */
  836. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  837. (!data))
  838. return 0;
  839. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  840. /*
  841. * We are inside the irq code
  842. */
  843. if (*depth_irq >= 0)
  844. return 1;
  845. if ((addr < (unsigned long)__irqentry_text_start) ||
  846. (addr >= (unsigned long)__irqentry_text_end))
  847. return 0;
  848. /*
  849. * We are entering irq code.
  850. */
  851. *depth_irq = depth;
  852. return 1;
  853. }
  854. /*
  855. * Return check for irq code
  856. *
  857. * returns 1 if
  858. * - we are inside irq code
  859. * - we just left irq code
  860. *
  861. * returns 0 if
  862. * - funcgraph-interrupts option is set
  863. * - we are not inside irq code
  864. */
  865. static int
  866. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  867. {
  868. int cpu = iter->cpu;
  869. int *depth_irq;
  870. struct fgraph_data *data = iter->private;
  871. /*
  872. * If we are either displaying irqs, or we got called as
  873. * a graph event and private data does not exist,
  874. * then we bypass the irq check.
  875. */
  876. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  877. (!data))
  878. return 0;
  879. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  880. /*
  881. * We are not inside the irq code.
  882. */
  883. if (*depth_irq == -1)
  884. return 0;
  885. /*
  886. * We are inside the irq code, and this is returning entry.
  887. * Let's not trace it and clear the entry depth, since
  888. * we are out of irq code.
  889. *
  890. * This condition ensures that we 'leave the irq code' once
  891. * we are out of the entry depth. Thus protecting us from
  892. * the RETURN entry loss.
  893. */
  894. if (*depth_irq >= depth) {
  895. *depth_irq = -1;
  896. return 1;
  897. }
  898. /*
  899. * We are inside the irq code, and this is not the entry.
  900. */
  901. return 1;
  902. }
  903. static enum print_line_t
  904. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  905. struct trace_iterator *iter, u32 flags)
  906. {
  907. struct fgraph_data *data = iter->private;
  908. struct ftrace_graph_ent *call = &field->graph_ent;
  909. struct ftrace_graph_ret_entry *leaf_ret;
  910. static enum print_line_t ret;
  911. int cpu = iter->cpu;
  912. if (check_irq_entry(iter, flags, call->func, call->depth))
  913. return TRACE_TYPE_HANDLED;
  914. print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
  915. leaf_ret = get_return_for_leaf(iter, field);
  916. if (leaf_ret)
  917. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  918. else
  919. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  920. if (data) {
  921. /*
  922. * If we failed to write our output, then we need to make
  923. * note of it. Because we already consumed our entry.
  924. */
  925. if (s->full) {
  926. data->failed = 1;
  927. data->cpu = cpu;
  928. } else
  929. data->failed = 0;
  930. }
  931. return ret;
  932. }
  933. static enum print_line_t
  934. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  935. struct trace_entry *ent, struct trace_iterator *iter,
  936. u32 flags)
  937. {
  938. unsigned long long duration = trace->rettime - trace->calltime;
  939. struct fgraph_data *data = iter->private;
  940. struct trace_array *tr = iter->tr;
  941. pid_t pid = ent->pid;
  942. int cpu = iter->cpu;
  943. int func_match = 1;
  944. int i;
  945. if (check_irq_return(iter, flags, trace->depth))
  946. return TRACE_TYPE_HANDLED;
  947. if (data) {
  948. struct fgraph_cpu_data *cpu_data;
  949. int cpu = iter->cpu;
  950. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  951. /*
  952. * Comments display at + 1 to depth. This is the
  953. * return from a function, we now want the comments
  954. * to display at the same level of the bracket.
  955. */
  956. cpu_data->depth = trace->depth - 1;
  957. if (trace->depth < FTRACE_RETFUNC_DEPTH &&
  958. !WARN_ON_ONCE(trace->depth < 0)) {
  959. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  960. func_match = 0;
  961. cpu_data->enter_funcs[trace->depth] = 0;
  962. }
  963. }
  964. print_graph_prologue(iter, s, 0, 0, flags);
  965. /* Overhead and duration */
  966. print_graph_duration(tr, duration, s, flags);
  967. /* Closing brace */
  968. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
  969. trace_seq_putc(s, ' ');
  970. /*
  971. * If the return function does not have a matching entry,
  972. * then the entry was lost. Instead of just printing
  973. * the '}' and letting the user guess what function this
  974. * belongs to, write out the function name. Always do
  975. * that if the funcgraph-tail option is enabled.
  976. */
  977. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
  978. trace_seq_puts(s, "}\n");
  979. else
  980. trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  981. /* Overrun */
  982. if (flags & TRACE_GRAPH_PRINT_OVERRUN)
  983. trace_seq_printf(s, " (Overruns: %lu)\n",
  984. trace->overrun);
  985. print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  986. cpu, pid, flags);
  987. return trace_handle_return(s);
  988. }
  989. static enum print_line_t
  990. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  991. struct trace_iterator *iter, u32 flags)
  992. {
  993. struct trace_array *tr = iter->tr;
  994. unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
  995. struct fgraph_data *data = iter->private;
  996. struct trace_event *event;
  997. int depth = 0;
  998. int ret;
  999. int i;
  1000. if (data)
  1001. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  1002. print_graph_prologue(iter, s, 0, 0, flags);
  1003. /* No time */
  1004. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  1005. /* Indentation */
  1006. if (depth > 0)
  1007. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
  1008. trace_seq_putc(s, ' ');
  1009. /* The comment */
  1010. trace_seq_puts(s, "/* ");
  1011. switch (iter->ent->type) {
  1012. case TRACE_BPUTS:
  1013. ret = trace_print_bputs_msg_only(iter);
  1014. if (ret != TRACE_TYPE_HANDLED)
  1015. return ret;
  1016. break;
  1017. case TRACE_BPRINT:
  1018. ret = trace_print_bprintk_msg_only(iter);
  1019. if (ret != TRACE_TYPE_HANDLED)
  1020. return ret;
  1021. break;
  1022. case TRACE_PRINT:
  1023. ret = trace_print_printk_msg_only(iter);
  1024. if (ret != TRACE_TYPE_HANDLED)
  1025. return ret;
  1026. break;
  1027. default:
  1028. event = ftrace_find_event(ent->type);
  1029. if (!event)
  1030. return TRACE_TYPE_UNHANDLED;
  1031. ret = event->funcs->trace(iter, sym_flags, event);
  1032. if (ret != TRACE_TYPE_HANDLED)
  1033. return ret;
  1034. }
  1035. if (trace_seq_has_overflowed(s))
  1036. goto out;
  1037. /* Strip ending newline */
  1038. if (s->buffer[s->seq.len - 1] == '\n') {
  1039. s->buffer[s->seq.len - 1] = '\0';
  1040. s->seq.len--;
  1041. }
  1042. trace_seq_puts(s, " */\n");
  1043. out:
  1044. return trace_handle_return(s);
  1045. }
  1046. enum print_line_t
  1047. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1048. {
  1049. struct ftrace_graph_ent_entry *field;
  1050. struct fgraph_data *data = iter->private;
  1051. struct trace_entry *entry = iter->ent;
  1052. struct trace_seq *s = &iter->seq;
  1053. int cpu = iter->cpu;
  1054. int ret;
  1055. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1056. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1057. return TRACE_TYPE_HANDLED;
  1058. }
  1059. /*
  1060. * If the last output failed, there's a possibility we need
  1061. * to print out the missing entry which would never go out.
  1062. */
  1063. if (data && data->failed) {
  1064. field = &data->ent;
  1065. iter->cpu = data->cpu;
  1066. ret = print_graph_entry(field, s, iter, flags);
  1067. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1068. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1069. ret = TRACE_TYPE_NO_CONSUME;
  1070. }
  1071. iter->cpu = cpu;
  1072. return ret;
  1073. }
  1074. switch (entry->type) {
  1075. case TRACE_GRAPH_ENT: {
  1076. /*
  1077. * print_graph_entry() may consume the current event,
  1078. * thus @field may become invalid, so we need to save it.
  1079. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1080. * it can be safely saved at the stack.
  1081. */
  1082. struct ftrace_graph_ent_entry saved;
  1083. trace_assign_type(field, entry);
  1084. saved = *field;
  1085. return print_graph_entry(&saved, s, iter, flags);
  1086. }
  1087. case TRACE_GRAPH_RET: {
  1088. struct ftrace_graph_ret_entry *field;
  1089. trace_assign_type(field, entry);
  1090. return print_graph_return(&field->ret, s, entry, iter, flags);
  1091. }
  1092. case TRACE_STACK:
  1093. case TRACE_FN:
  1094. /* dont trace stack and functions as comments */
  1095. return TRACE_TYPE_UNHANDLED;
  1096. default:
  1097. return print_graph_comment(s, entry, iter, flags);
  1098. }
  1099. return TRACE_TYPE_HANDLED;
  1100. }
  1101. static enum print_line_t
  1102. print_graph_function(struct trace_iterator *iter)
  1103. {
  1104. return print_graph_function_flags(iter, tracer_flags.val);
  1105. }
  1106. static enum print_line_t
  1107. print_graph_function_event(struct trace_iterator *iter, int flags,
  1108. struct trace_event *event)
  1109. {
  1110. return print_graph_function(iter);
  1111. }
  1112. static void print_lat_header(struct seq_file *s, u32 flags)
  1113. {
  1114. static const char spaces[] = " " /* 16 spaces */
  1115. " " /* 4 spaces */
  1116. " "; /* 17 spaces */
  1117. int size = 0;
  1118. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1119. size += 16;
  1120. if (flags & TRACE_GRAPH_PRINT_CPU)
  1121. size += 4;
  1122. if (flags & TRACE_GRAPH_PRINT_PROC)
  1123. size += 17;
  1124. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1125. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1126. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1127. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1128. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1129. }
  1130. static void __print_graph_headers_flags(struct trace_array *tr,
  1131. struct seq_file *s, u32 flags)
  1132. {
  1133. int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
  1134. if (lat)
  1135. print_lat_header(s, flags);
  1136. /* 1st line */
  1137. seq_putc(s, '#');
  1138. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1139. seq_puts(s, " TIME ");
  1140. if (flags & TRACE_GRAPH_PRINT_CPU)
  1141. seq_puts(s, " CPU");
  1142. if (flags & TRACE_GRAPH_PRINT_PROC)
  1143. seq_puts(s, " TASK/PID ");
  1144. if (lat)
  1145. seq_puts(s, "||||");
  1146. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1147. seq_puts(s, " DURATION ");
  1148. seq_puts(s, " FUNCTION CALLS\n");
  1149. /* 2nd line */
  1150. seq_putc(s, '#');
  1151. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1152. seq_puts(s, " | ");
  1153. if (flags & TRACE_GRAPH_PRINT_CPU)
  1154. seq_puts(s, " | ");
  1155. if (flags & TRACE_GRAPH_PRINT_PROC)
  1156. seq_puts(s, " | | ");
  1157. if (lat)
  1158. seq_puts(s, "||||");
  1159. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1160. seq_puts(s, " | | ");
  1161. seq_puts(s, " | | | |\n");
  1162. }
  1163. static void print_graph_headers(struct seq_file *s)
  1164. {
  1165. print_graph_headers_flags(s, tracer_flags.val);
  1166. }
  1167. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1168. {
  1169. struct trace_iterator *iter = s->private;
  1170. struct trace_array *tr = iter->tr;
  1171. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  1172. return;
  1173. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
  1174. /* print nothing if the buffers are empty */
  1175. if (trace_empty(iter))
  1176. return;
  1177. print_trace_header(s, iter);
  1178. }
  1179. __print_graph_headers_flags(tr, s, flags);
  1180. }
  1181. void graph_trace_open(struct trace_iterator *iter)
  1182. {
  1183. /* pid and depth on the last trace processed */
  1184. struct fgraph_data *data;
  1185. gfp_t gfpflags;
  1186. int cpu;
  1187. iter->private = NULL;
  1188. /* We can be called in atomic context via ftrace_dump() */
  1189. gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  1190. data = kzalloc(sizeof(*data), gfpflags);
  1191. if (!data)
  1192. goto out_err;
  1193. data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
  1194. if (!data->cpu_data)
  1195. goto out_err_free;
  1196. for_each_possible_cpu(cpu) {
  1197. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1198. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1199. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1200. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1201. *pid = -1;
  1202. *depth = 0;
  1203. *ignore = 0;
  1204. *depth_irq = -1;
  1205. }
  1206. iter->private = data;
  1207. return;
  1208. out_err_free:
  1209. kfree(data);
  1210. out_err:
  1211. pr_warn("function graph tracer: not enough memory\n");
  1212. }
  1213. void graph_trace_close(struct trace_iterator *iter)
  1214. {
  1215. struct fgraph_data *data = iter->private;
  1216. if (data) {
  1217. free_percpu(data->cpu_data);
  1218. kfree(data);
  1219. }
  1220. }
  1221. static int
  1222. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1223. {
  1224. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1225. ftrace_graph_skip_irqs = !set;
  1226. if (bit == TRACE_GRAPH_SLEEP_TIME)
  1227. ftrace_graph_sleep_time_control(set);
  1228. if (bit == TRACE_GRAPH_GRAPH_TIME)
  1229. ftrace_graph_graph_time_control(set);
  1230. return 0;
  1231. }
  1232. static struct trace_event_functions graph_functions = {
  1233. .trace = print_graph_function_event,
  1234. };
  1235. static struct trace_event graph_trace_entry_event = {
  1236. .type = TRACE_GRAPH_ENT,
  1237. .funcs = &graph_functions,
  1238. };
  1239. static struct trace_event graph_trace_ret_event = {
  1240. .type = TRACE_GRAPH_RET,
  1241. .funcs = &graph_functions
  1242. };
  1243. static struct tracer graph_trace __tracer_data = {
  1244. .name = "function_graph",
  1245. .update_thresh = graph_trace_update_thresh,
  1246. .open = graph_trace_open,
  1247. .pipe_open = graph_trace_open,
  1248. .close = graph_trace_close,
  1249. .pipe_close = graph_trace_close,
  1250. .init = graph_trace_init,
  1251. .reset = graph_trace_reset,
  1252. .print_line = print_graph_function,
  1253. .print_header = print_graph_headers,
  1254. .flags = &tracer_flags,
  1255. .set_flag = func_graph_set_flag,
  1256. #ifdef CONFIG_FTRACE_SELFTEST
  1257. .selftest = trace_selftest_startup_function_graph,
  1258. #endif
  1259. };
  1260. static ssize_t
  1261. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1262. loff_t *ppos)
  1263. {
  1264. unsigned long val;
  1265. int ret;
  1266. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1267. if (ret)
  1268. return ret;
  1269. fgraph_max_depth = val;
  1270. *ppos += cnt;
  1271. return cnt;
  1272. }
  1273. static ssize_t
  1274. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1275. loff_t *ppos)
  1276. {
  1277. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1278. int n;
  1279. n = sprintf(buf, "%d\n", fgraph_max_depth);
  1280. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1281. }
  1282. static const struct file_operations graph_depth_fops = {
  1283. .open = tracing_open_generic,
  1284. .write = graph_depth_write,
  1285. .read = graph_depth_read,
  1286. .llseek = generic_file_llseek,
  1287. };
  1288. static __init int init_graph_tracefs(void)
  1289. {
  1290. struct dentry *d_tracer;
  1291. d_tracer = tracing_init_dentry();
  1292. if (IS_ERR(d_tracer))
  1293. return 0;
  1294. trace_create_file("max_graph_depth", 0644, d_tracer,
  1295. NULL, &graph_depth_fops);
  1296. return 0;
  1297. }
  1298. fs_initcall(init_graph_tracefs);
  1299. static __init int init_graph_trace(void)
  1300. {
  1301. max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
  1302. if (!register_trace_event(&graph_trace_entry_event)) {
  1303. pr_warn("Warning: could not register graph trace events\n");
  1304. return 1;
  1305. }
  1306. if (!register_trace_event(&graph_trace_ret_event)) {
  1307. pr_warn("Warning: could not register graph trace events\n");
  1308. return 1;
  1309. }
  1310. return register_tracer(&graph_trace);
  1311. }
  1312. core_initcall(init_graph_trace);