trace_functions_graph.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/uaccess.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. static bool kill_ftrace_graph;
  17. /**
  18. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  19. *
  20. * ftrace_graph_stop() is called when a severe error is detected in
  21. * the function graph tracing. This function is called by the critical
  22. * paths of function graph to keep those paths from doing any more harm.
  23. */
  24. bool ftrace_graph_is_dead(void)
  25. {
  26. return kill_ftrace_graph;
  27. }
  28. /**
  29. * ftrace_graph_stop - set to permanently disable function graph tracincg
  30. *
  31. * In case of an error int function graph tracing, this is called
  32. * to try to keep function graph tracing from causing any more harm.
  33. * Usually this is pretty severe and this is called to try to at least
  34. * get a warning out to the user.
  35. */
  36. void ftrace_graph_stop(void)
  37. {
  38. kill_ftrace_graph = true;
  39. }
  40. /* When set, irq functions will be ignored */
  41. static int ftrace_graph_skip_irqs;
  42. struct fgraph_cpu_data {
  43. pid_t last_pid;
  44. int depth;
  45. int depth_irq;
  46. int ignore;
  47. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  48. };
  49. struct fgraph_data {
  50. struct fgraph_cpu_data __percpu *cpu_data;
  51. /* Place to preserve last processed entry. */
  52. struct ftrace_graph_ent_entry ent;
  53. struct ftrace_graph_ret_entry ret;
  54. int failed;
  55. int cpu;
  56. };
  57. #define TRACE_GRAPH_INDENT 2
  58. unsigned int fgraph_max_depth;
  59. static struct tracer_opt trace_opts[] = {
  60. /* Display overruns? (for self-debug purpose) */
  61. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  62. /* Display CPU ? */
  63. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  64. /* Display Overhead ? */
  65. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  66. /* Display proc name/pid */
  67. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  68. /* Display duration of execution */
  69. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  70. /* Display absolute time of an entry */
  71. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  72. /* Display interrupts */
  73. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  74. /* Display function name after trailing } */
  75. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  76. /* Include sleep time (scheduled out) between entry and return */
  77. { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  78. /* Include time within nested functions */
  79. { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  80. { } /* Empty entry */
  81. };
  82. static struct tracer_flags tracer_flags = {
  83. /* Don't display overruns, proc, or tail by default */
  84. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  85. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  86. TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  87. .opts = trace_opts
  88. };
  89. static struct trace_array *graph_array;
  90. /*
  91. * DURATION column is being also used to display IRQ signs,
  92. * following values are used by print_graph_irq and others
  93. * to fill in space into DURATION column.
  94. */
  95. enum {
  96. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  97. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  98. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  99. };
  100. static void
  101. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  102. struct trace_seq *s, u32 flags);
  103. /* Add a function return address to the trace stack on thread info.*/
  104. int
  105. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  106. unsigned long frame_pointer, unsigned long *retp)
  107. {
  108. unsigned long long calltime;
  109. int index;
  110. if (unlikely(ftrace_graph_is_dead()))
  111. return -EBUSY;
  112. if (!current->ret_stack)
  113. return -EBUSY;
  114. /*
  115. * We must make sure the ret_stack is tested before we read
  116. * anything else.
  117. */
  118. smp_rmb();
  119. /* The return trace stack is full */
  120. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  121. atomic_inc(&current->trace_overrun);
  122. return -EBUSY;
  123. }
  124. /*
  125. * The curr_ret_stack is an index to ftrace return stack of
  126. * current task. Its value should be in [0, FTRACE_RETFUNC_
  127. * DEPTH) when the function graph tracer is used. To support
  128. * filtering out specific functions, it makes the index
  129. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  130. * so when it sees a negative index the ftrace will ignore
  131. * the record. And the index gets recovered when returning
  132. * from the filtered function by adding the FTRACE_NOTRACE_
  133. * DEPTH and then it'll continue to record functions normally.
  134. *
  135. * The curr_ret_stack is initialized to -1 and get increased
  136. * in this function. So it can be less than -1 only if it was
  137. * filtered out via ftrace_graph_notrace_addr() which can be
  138. * set from set_graph_notrace file in tracefs by user.
  139. */
  140. if (current->curr_ret_stack < -1)
  141. return -EBUSY;
  142. calltime = trace_clock_local();
  143. index = ++current->curr_ret_stack;
  144. if (ftrace_graph_notrace_addr(func))
  145. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  146. barrier();
  147. current->ret_stack[index].ret = ret;
  148. current->ret_stack[index].func = func;
  149. current->ret_stack[index].calltime = calltime;
  150. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  151. current->ret_stack[index].fp = frame_pointer;
  152. #endif
  153. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  154. current->ret_stack[index].retp = retp;
  155. #endif
  156. *depth = current->curr_ret_stack;
  157. return 0;
  158. }
  159. /* Retrieve a function return address to the trace stack on thread info.*/
  160. static void
  161. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  162. unsigned long frame_pointer)
  163. {
  164. int index;
  165. index = current->curr_ret_stack;
  166. /*
  167. * A negative index here means that it's just returned from a
  168. * notrace'd function. Recover index to get an original
  169. * return address. See ftrace_push_return_trace().
  170. *
  171. * TODO: Need to check whether the stack gets corrupted.
  172. */
  173. if (index < 0)
  174. index += FTRACE_NOTRACE_DEPTH;
  175. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  176. ftrace_graph_stop();
  177. WARN_ON(1);
  178. /* Might as well panic, otherwise we have no where to go */
  179. *ret = (unsigned long)panic;
  180. return;
  181. }
  182. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  183. /*
  184. * The arch may choose to record the frame pointer used
  185. * and check it here to make sure that it is what we expect it
  186. * to be. If gcc does not set the place holder of the return
  187. * address in the frame pointer, and does a copy instead, then
  188. * the function graph trace will fail. This test detects this
  189. * case.
  190. *
  191. * Currently, x86_32 with optimize for size (-Os) makes the latest
  192. * gcc do the above.
  193. *
  194. * Note, -mfentry does not use frame pointers, and this test
  195. * is not needed if CC_USING_FENTRY is set.
  196. */
  197. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  198. ftrace_graph_stop();
  199. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  200. " from func %ps return to %lx\n",
  201. current->ret_stack[index].fp,
  202. frame_pointer,
  203. (void *)current->ret_stack[index].func,
  204. current->ret_stack[index].ret);
  205. *ret = (unsigned long)panic;
  206. return;
  207. }
  208. #endif
  209. *ret = current->ret_stack[index].ret;
  210. trace->func = current->ret_stack[index].func;
  211. trace->calltime = current->ret_stack[index].calltime;
  212. trace->overrun = atomic_read(&current->trace_overrun);
  213. trace->depth = index;
  214. }
  215. /*
  216. * Send the trace to the ring-buffer.
  217. * @return the original return address.
  218. */
  219. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  220. {
  221. struct ftrace_graph_ret trace;
  222. unsigned long ret;
  223. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  224. trace.rettime = trace_clock_local();
  225. barrier();
  226. current->curr_ret_stack--;
  227. /*
  228. * The curr_ret_stack can be less than -1 only if it was
  229. * filtered out and it's about to return from the function.
  230. * Recover the index and continue to trace normal functions.
  231. */
  232. if (current->curr_ret_stack < -1) {
  233. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  234. return ret;
  235. }
  236. /*
  237. * The trace should run after decrementing the ret counter
  238. * in case an interrupt were to come in. We don't want to
  239. * lose the interrupt if max_depth is set.
  240. */
  241. ftrace_graph_return(&trace);
  242. if (unlikely(!ret)) {
  243. ftrace_graph_stop();
  244. WARN_ON(1);
  245. /* Might as well panic. What else to do? */
  246. ret = (unsigned long)panic;
  247. }
  248. return ret;
  249. }
  250. /**
  251. * ftrace_graph_ret_addr - convert a potentially modified stack return address
  252. * to its original value
  253. *
  254. * This function can be called by stack unwinding code to convert a found stack
  255. * return address ('ret') to its original value, in case the function graph
  256. * tracer has modified it to be 'return_to_handler'. If the address hasn't
  257. * been modified, the unchanged value of 'ret' is returned.
  258. *
  259. * 'idx' is a state variable which should be initialized by the caller to zero
  260. * before the first call.
  261. *
  262. * 'retp' is a pointer to the return address on the stack. It's ignored if
  263. * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
  264. */
  265. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  266. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  267. unsigned long ret, unsigned long *retp)
  268. {
  269. int index = task->curr_ret_stack;
  270. int i;
  271. if (ret != (unsigned long)return_to_handler)
  272. return ret;
  273. if (index < -1)
  274. index += FTRACE_NOTRACE_DEPTH;
  275. if (index < 0)
  276. return ret;
  277. for (i = 0; i <= index; i++)
  278. if (task->ret_stack[i].retp == retp)
  279. return task->ret_stack[i].ret;
  280. return ret;
  281. }
  282. #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  283. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  284. unsigned long ret, unsigned long *retp)
  285. {
  286. int task_idx;
  287. if (ret != (unsigned long)return_to_handler)
  288. return ret;
  289. task_idx = task->curr_ret_stack;
  290. if (!task->ret_stack || task_idx < *idx)
  291. return ret;
  292. task_idx -= *idx;
  293. (*idx)++;
  294. return task->ret_stack[task_idx].ret;
  295. }
  296. #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  297. int __trace_graph_entry(struct trace_array *tr,
  298. struct ftrace_graph_ent *trace,
  299. unsigned long flags,
  300. int pc)
  301. {
  302. struct trace_event_call *call = &event_funcgraph_entry;
  303. struct ring_buffer_event *event;
  304. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  305. struct ftrace_graph_ent_entry *entry;
  306. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  307. sizeof(*entry), flags, pc);
  308. if (!event)
  309. return 0;
  310. entry = ring_buffer_event_data(event);
  311. entry->graph_ent = *trace;
  312. if (!call_filter_check_discard(call, entry, buffer, event))
  313. trace_buffer_unlock_commit_nostack(buffer, event);
  314. return 1;
  315. }
  316. static inline int ftrace_graph_ignore_irqs(void)
  317. {
  318. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  319. return 0;
  320. return in_irq();
  321. }
  322. int trace_graph_entry(struct ftrace_graph_ent *trace)
  323. {
  324. struct trace_array *tr = graph_array;
  325. struct trace_array_cpu *data;
  326. unsigned long flags;
  327. long disabled;
  328. int ret;
  329. int cpu;
  330. int pc;
  331. if (!ftrace_trace_task(tr))
  332. return 0;
  333. if (ftrace_graph_ignore_func(trace))
  334. return 0;
  335. if (ftrace_graph_ignore_irqs())
  336. return 0;
  337. /*
  338. * Do not trace a function if it's filtered by set_graph_notrace.
  339. * Make the index of ret stack negative to indicate that it should
  340. * ignore further functions. But it needs its own ret stack entry
  341. * to recover the original index in order to continue tracing after
  342. * returning from the function.
  343. */
  344. if (ftrace_graph_notrace_addr(trace->func))
  345. return 1;
  346. /*
  347. * Stop here if tracing_threshold is set. We only write function return
  348. * events to the ring buffer.
  349. */
  350. if (tracing_thresh)
  351. return 1;
  352. local_irq_save(flags);
  353. cpu = raw_smp_processor_id();
  354. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  355. disabled = atomic_inc_return(&data->disabled);
  356. if (likely(disabled == 1)) {
  357. pc = preempt_count();
  358. ret = __trace_graph_entry(tr, trace, flags, pc);
  359. } else {
  360. ret = 0;
  361. }
  362. atomic_dec(&data->disabled);
  363. local_irq_restore(flags);
  364. return ret;
  365. }
  366. static void
  367. __trace_graph_function(struct trace_array *tr,
  368. unsigned long ip, unsigned long flags, int pc)
  369. {
  370. u64 time = trace_clock_local();
  371. struct ftrace_graph_ent ent = {
  372. .func = ip,
  373. .depth = 0,
  374. };
  375. struct ftrace_graph_ret ret = {
  376. .func = ip,
  377. .depth = 0,
  378. .calltime = time,
  379. .rettime = time,
  380. };
  381. __trace_graph_entry(tr, &ent, flags, pc);
  382. __trace_graph_return(tr, &ret, flags, pc);
  383. }
  384. void
  385. trace_graph_function(struct trace_array *tr,
  386. unsigned long ip, unsigned long parent_ip,
  387. unsigned long flags, int pc)
  388. {
  389. __trace_graph_function(tr, ip, flags, pc);
  390. }
  391. void __trace_graph_return(struct trace_array *tr,
  392. struct ftrace_graph_ret *trace,
  393. unsigned long flags,
  394. int pc)
  395. {
  396. struct trace_event_call *call = &event_funcgraph_exit;
  397. struct ring_buffer_event *event;
  398. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  399. struct ftrace_graph_ret_entry *entry;
  400. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  401. sizeof(*entry), flags, pc);
  402. if (!event)
  403. return;
  404. entry = ring_buffer_event_data(event);
  405. entry->ret = *trace;
  406. if (!call_filter_check_discard(call, entry, buffer, event))
  407. trace_buffer_unlock_commit_nostack(buffer, event);
  408. }
  409. void trace_graph_return(struct ftrace_graph_ret *trace)
  410. {
  411. struct trace_array *tr = graph_array;
  412. struct trace_array_cpu *data;
  413. unsigned long flags;
  414. long disabled;
  415. int cpu;
  416. int pc;
  417. local_irq_save(flags);
  418. cpu = raw_smp_processor_id();
  419. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  420. disabled = atomic_inc_return(&data->disabled);
  421. if (likely(disabled == 1)) {
  422. pc = preempt_count();
  423. __trace_graph_return(tr, trace, flags, pc);
  424. }
  425. atomic_dec(&data->disabled);
  426. local_irq_restore(flags);
  427. }
  428. void set_graph_array(struct trace_array *tr)
  429. {
  430. graph_array = tr;
  431. /* Make graph_array visible before we start tracing */
  432. smp_mb();
  433. }
  434. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  435. {
  436. if (tracing_thresh &&
  437. (trace->rettime - trace->calltime < tracing_thresh))
  438. return;
  439. else
  440. trace_graph_return(trace);
  441. }
  442. static int graph_trace_init(struct trace_array *tr)
  443. {
  444. int ret;
  445. set_graph_array(tr);
  446. if (tracing_thresh)
  447. ret = register_ftrace_graph(&trace_graph_thresh_return,
  448. &trace_graph_entry);
  449. else
  450. ret = register_ftrace_graph(&trace_graph_return,
  451. &trace_graph_entry);
  452. if (ret)
  453. return ret;
  454. tracing_start_cmdline_record();
  455. return 0;
  456. }
  457. static void graph_trace_reset(struct trace_array *tr)
  458. {
  459. tracing_stop_cmdline_record();
  460. unregister_ftrace_graph();
  461. }
  462. static int graph_trace_update_thresh(struct trace_array *tr)
  463. {
  464. graph_trace_reset(tr);
  465. return graph_trace_init(tr);
  466. }
  467. static int max_bytes_for_cpu;
  468. static void print_graph_cpu(struct trace_seq *s, int cpu)
  469. {
  470. /*
  471. * Start with a space character - to make it stand out
  472. * to the right a bit when trace output is pasted into
  473. * email:
  474. */
  475. trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  476. }
  477. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  478. static void print_graph_proc(struct trace_seq *s, pid_t pid)
  479. {
  480. char comm[TASK_COMM_LEN];
  481. /* sign + log10(MAX_INT) + '\0' */
  482. char pid_str[11];
  483. int spaces = 0;
  484. int len;
  485. int i;
  486. trace_find_cmdline(pid, comm);
  487. comm[7] = '\0';
  488. sprintf(pid_str, "%d", pid);
  489. /* 1 stands for the "-" character */
  490. len = strlen(comm) + strlen(pid_str) + 1;
  491. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  492. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  493. /* First spaces to align center */
  494. for (i = 0; i < spaces / 2; i++)
  495. trace_seq_putc(s, ' ');
  496. trace_seq_printf(s, "%s-%s", comm, pid_str);
  497. /* Last spaces to align center */
  498. for (i = 0; i < spaces - (spaces / 2); i++)
  499. trace_seq_putc(s, ' ');
  500. }
  501. static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  502. {
  503. trace_seq_putc(s, ' ');
  504. trace_print_lat_fmt(s, entry);
  505. }
  506. /* If the pid changed since the last trace, output this event */
  507. static void
  508. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  509. {
  510. pid_t prev_pid;
  511. pid_t *last_pid;
  512. if (!data)
  513. return;
  514. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  515. if (*last_pid == pid)
  516. return;
  517. prev_pid = *last_pid;
  518. *last_pid = pid;
  519. if (prev_pid == -1)
  520. return;
  521. /*
  522. * Context-switch trace line:
  523. ------------------------------------------
  524. | 1) migration/0--1 => sshd-1755
  525. ------------------------------------------
  526. */
  527. trace_seq_puts(s, " ------------------------------------------\n");
  528. print_graph_cpu(s, cpu);
  529. print_graph_proc(s, prev_pid);
  530. trace_seq_puts(s, " => ");
  531. print_graph_proc(s, pid);
  532. trace_seq_puts(s, "\n ------------------------------------------\n\n");
  533. }
  534. static struct ftrace_graph_ret_entry *
  535. get_return_for_leaf(struct trace_iterator *iter,
  536. struct ftrace_graph_ent_entry *curr)
  537. {
  538. struct fgraph_data *data = iter->private;
  539. struct ring_buffer_iter *ring_iter = NULL;
  540. struct ring_buffer_event *event;
  541. struct ftrace_graph_ret_entry *next;
  542. /*
  543. * If the previous output failed to write to the seq buffer,
  544. * then we just reuse the data from before.
  545. */
  546. if (data && data->failed) {
  547. curr = &data->ent;
  548. next = &data->ret;
  549. } else {
  550. ring_iter = trace_buffer_iter(iter, iter->cpu);
  551. /* First peek to compare current entry and the next one */
  552. if (ring_iter)
  553. event = ring_buffer_iter_peek(ring_iter, NULL);
  554. else {
  555. /*
  556. * We need to consume the current entry to see
  557. * the next one.
  558. */
  559. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  560. NULL, NULL);
  561. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  562. NULL, NULL);
  563. }
  564. if (!event)
  565. return NULL;
  566. next = ring_buffer_event_data(event);
  567. if (data) {
  568. /*
  569. * Save current and next entries for later reference
  570. * if the output fails.
  571. */
  572. data->ent = *curr;
  573. /*
  574. * If the next event is not a return type, then
  575. * we only care about what type it is. Otherwise we can
  576. * safely copy the entire event.
  577. */
  578. if (next->ent.type == TRACE_GRAPH_RET)
  579. data->ret = *next;
  580. else
  581. data->ret.ent.type = next->ent.type;
  582. }
  583. }
  584. if (next->ent.type != TRACE_GRAPH_RET)
  585. return NULL;
  586. if (curr->ent.pid != next->ent.pid ||
  587. curr->graph_ent.func != next->ret.func)
  588. return NULL;
  589. /* this is a leaf, now advance the iterator */
  590. if (ring_iter)
  591. ring_buffer_read(ring_iter, NULL);
  592. return next;
  593. }
  594. static void print_graph_abs_time(u64 t, struct trace_seq *s)
  595. {
  596. unsigned long usecs_rem;
  597. usecs_rem = do_div(t, NSEC_PER_SEC);
  598. usecs_rem /= 1000;
  599. trace_seq_printf(s, "%5lu.%06lu | ",
  600. (unsigned long)t, usecs_rem);
  601. }
  602. static void
  603. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  604. enum trace_type type, int cpu, pid_t pid, u32 flags)
  605. {
  606. struct trace_array *tr = iter->tr;
  607. struct trace_seq *s = &iter->seq;
  608. struct trace_entry *ent = iter->ent;
  609. if (addr < (unsigned long)__irqentry_text_start ||
  610. addr >= (unsigned long)__irqentry_text_end)
  611. return;
  612. if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
  613. /* Absolute time */
  614. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  615. print_graph_abs_time(iter->ts, s);
  616. /* Cpu */
  617. if (flags & TRACE_GRAPH_PRINT_CPU)
  618. print_graph_cpu(s, cpu);
  619. /* Proc */
  620. if (flags & TRACE_GRAPH_PRINT_PROC) {
  621. print_graph_proc(s, pid);
  622. trace_seq_puts(s, " | ");
  623. }
  624. /* Latency format */
  625. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  626. print_graph_lat_fmt(s, ent);
  627. }
  628. /* No overhead */
  629. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
  630. if (type == TRACE_GRAPH_ENT)
  631. trace_seq_puts(s, "==========>");
  632. else
  633. trace_seq_puts(s, "<==========");
  634. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
  635. trace_seq_putc(s, '\n');
  636. }
  637. void
  638. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  639. {
  640. unsigned long nsecs_rem = do_div(duration, 1000);
  641. /* log10(ULONG_MAX) + '\0' */
  642. char usecs_str[21];
  643. char nsecs_str[5];
  644. int len;
  645. int i;
  646. sprintf(usecs_str, "%lu", (unsigned long) duration);
  647. /* Print msecs */
  648. trace_seq_printf(s, "%s", usecs_str);
  649. len = strlen(usecs_str);
  650. /* Print nsecs (we don't want to exceed 7 numbers) */
  651. if (len < 7) {
  652. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  653. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  654. trace_seq_printf(s, ".%s", nsecs_str);
  655. len += strlen(nsecs_str) + 1;
  656. }
  657. trace_seq_puts(s, " us ");
  658. /* Print remaining spaces to fit the row's width */
  659. for (i = len; i < 8; i++)
  660. trace_seq_putc(s, ' ');
  661. }
  662. static void
  663. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  664. struct trace_seq *s, u32 flags)
  665. {
  666. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  667. !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  668. return;
  669. /* No real adata, just filling the column with spaces */
  670. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  671. case FLAGS_FILL_FULL:
  672. trace_seq_puts(s, " | ");
  673. return;
  674. case FLAGS_FILL_START:
  675. trace_seq_puts(s, " ");
  676. return;
  677. case FLAGS_FILL_END:
  678. trace_seq_puts(s, " |");
  679. return;
  680. }
  681. /* Signal a overhead of time execution to the output */
  682. if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
  683. trace_seq_printf(s, "%c ", trace_find_mark(duration));
  684. else
  685. trace_seq_puts(s, " ");
  686. trace_print_graph_duration(duration, s);
  687. trace_seq_puts(s, "| ");
  688. }
  689. /* Case of a leaf function on its call entry */
  690. static enum print_line_t
  691. print_graph_entry_leaf(struct trace_iterator *iter,
  692. struct ftrace_graph_ent_entry *entry,
  693. struct ftrace_graph_ret_entry *ret_entry,
  694. struct trace_seq *s, u32 flags)
  695. {
  696. struct fgraph_data *data = iter->private;
  697. struct trace_array *tr = iter->tr;
  698. struct ftrace_graph_ret *graph_ret;
  699. struct ftrace_graph_ent *call;
  700. unsigned long long duration;
  701. int i;
  702. graph_ret = &ret_entry->ret;
  703. call = &entry->graph_ent;
  704. duration = graph_ret->rettime - graph_ret->calltime;
  705. if (data) {
  706. struct fgraph_cpu_data *cpu_data;
  707. int cpu = iter->cpu;
  708. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  709. /* If a graph tracer ignored set_graph_notrace */
  710. if (call->depth < -1)
  711. call->depth += FTRACE_NOTRACE_DEPTH;
  712. /*
  713. * Comments display at + 1 to depth. Since
  714. * this is a leaf function, keep the comments
  715. * equal to this depth.
  716. */
  717. cpu_data->depth = call->depth - 1;
  718. /* No need to keep this function around for this depth */
  719. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  720. !WARN_ON_ONCE(call->depth < 0))
  721. cpu_data->enter_funcs[call->depth] = 0;
  722. }
  723. /* Overhead and duration */
  724. print_graph_duration(tr, duration, s, flags);
  725. /* Function */
  726. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  727. trace_seq_putc(s, ' ');
  728. trace_seq_printf(s, "%ps();\n", (void *)call->func);
  729. return trace_handle_return(s);
  730. }
  731. static enum print_line_t
  732. print_graph_entry_nested(struct trace_iterator *iter,
  733. struct ftrace_graph_ent_entry *entry,
  734. struct trace_seq *s, int cpu, u32 flags)
  735. {
  736. struct ftrace_graph_ent *call = &entry->graph_ent;
  737. struct fgraph_data *data = iter->private;
  738. struct trace_array *tr = iter->tr;
  739. int i;
  740. if (data) {
  741. struct fgraph_cpu_data *cpu_data;
  742. int cpu = iter->cpu;
  743. /* If a graph tracer ignored set_graph_notrace */
  744. if (call->depth < -1)
  745. call->depth += FTRACE_NOTRACE_DEPTH;
  746. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  747. cpu_data->depth = call->depth;
  748. /* Save this function pointer to see if the exit matches */
  749. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  750. !WARN_ON_ONCE(call->depth < 0))
  751. cpu_data->enter_funcs[call->depth] = call->func;
  752. }
  753. /* No time */
  754. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  755. /* Function */
  756. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  757. trace_seq_putc(s, ' ');
  758. trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  759. if (trace_seq_has_overflowed(s))
  760. return TRACE_TYPE_PARTIAL_LINE;
  761. /*
  762. * we already consumed the current entry to check the next one
  763. * and see if this is a leaf.
  764. */
  765. return TRACE_TYPE_NO_CONSUME;
  766. }
  767. static void
  768. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  769. int type, unsigned long addr, u32 flags)
  770. {
  771. struct fgraph_data *data = iter->private;
  772. struct trace_entry *ent = iter->ent;
  773. struct trace_array *tr = iter->tr;
  774. int cpu = iter->cpu;
  775. /* Pid */
  776. verif_pid(s, ent->pid, cpu, data);
  777. if (type)
  778. /* Interrupt */
  779. print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  780. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  781. return;
  782. /* Absolute time */
  783. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  784. print_graph_abs_time(iter->ts, s);
  785. /* Cpu */
  786. if (flags & TRACE_GRAPH_PRINT_CPU)
  787. print_graph_cpu(s, cpu);
  788. /* Proc */
  789. if (flags & TRACE_GRAPH_PRINT_PROC) {
  790. print_graph_proc(s, ent->pid);
  791. trace_seq_puts(s, " | ");
  792. }
  793. /* Latency format */
  794. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  795. print_graph_lat_fmt(s, ent);
  796. return;
  797. }
  798. /*
  799. * Entry check for irq code
  800. *
  801. * returns 1 if
  802. * - we are inside irq code
  803. * - we just entered irq code
  804. *
  805. * retunns 0 if
  806. * - funcgraph-interrupts option is set
  807. * - we are not inside irq code
  808. */
  809. static int
  810. check_irq_entry(struct trace_iterator *iter, u32 flags,
  811. unsigned long addr, int depth)
  812. {
  813. int cpu = iter->cpu;
  814. int *depth_irq;
  815. struct fgraph_data *data = iter->private;
  816. /*
  817. * If we are either displaying irqs, or we got called as
  818. * a graph event and private data does not exist,
  819. * then we bypass the irq check.
  820. */
  821. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  822. (!data))
  823. return 0;
  824. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  825. /*
  826. * We are inside the irq code
  827. */
  828. if (*depth_irq >= 0)
  829. return 1;
  830. if ((addr < (unsigned long)__irqentry_text_start) ||
  831. (addr >= (unsigned long)__irqentry_text_end))
  832. return 0;
  833. /*
  834. * We are entering irq code.
  835. */
  836. *depth_irq = depth;
  837. return 1;
  838. }
  839. /*
  840. * Return check for irq code
  841. *
  842. * returns 1 if
  843. * - we are inside irq code
  844. * - we just left irq code
  845. *
  846. * returns 0 if
  847. * - funcgraph-interrupts option is set
  848. * - we are not inside irq code
  849. */
  850. static int
  851. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  852. {
  853. int cpu = iter->cpu;
  854. int *depth_irq;
  855. struct fgraph_data *data = iter->private;
  856. /*
  857. * If we are either displaying irqs, or we got called as
  858. * a graph event and private data does not exist,
  859. * then we bypass the irq check.
  860. */
  861. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  862. (!data))
  863. return 0;
  864. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  865. /*
  866. * We are not inside the irq code.
  867. */
  868. if (*depth_irq == -1)
  869. return 0;
  870. /*
  871. * We are inside the irq code, and this is returning entry.
  872. * Let's not trace it and clear the entry depth, since
  873. * we are out of irq code.
  874. *
  875. * This condition ensures that we 'leave the irq code' once
  876. * we are out of the entry depth. Thus protecting us from
  877. * the RETURN entry loss.
  878. */
  879. if (*depth_irq >= depth) {
  880. *depth_irq = -1;
  881. return 1;
  882. }
  883. /*
  884. * We are inside the irq code, and this is not the entry.
  885. */
  886. return 1;
  887. }
  888. static enum print_line_t
  889. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  890. struct trace_iterator *iter, u32 flags)
  891. {
  892. struct fgraph_data *data = iter->private;
  893. struct ftrace_graph_ent *call = &field->graph_ent;
  894. struct ftrace_graph_ret_entry *leaf_ret;
  895. static enum print_line_t ret;
  896. int cpu = iter->cpu;
  897. if (check_irq_entry(iter, flags, call->func, call->depth))
  898. return TRACE_TYPE_HANDLED;
  899. print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
  900. leaf_ret = get_return_for_leaf(iter, field);
  901. if (leaf_ret)
  902. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  903. else
  904. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  905. if (data) {
  906. /*
  907. * If we failed to write our output, then we need to make
  908. * note of it. Because we already consumed our entry.
  909. */
  910. if (s->full) {
  911. data->failed = 1;
  912. data->cpu = cpu;
  913. } else
  914. data->failed = 0;
  915. }
  916. return ret;
  917. }
  918. static enum print_line_t
  919. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  920. struct trace_entry *ent, struct trace_iterator *iter,
  921. u32 flags)
  922. {
  923. unsigned long long duration = trace->rettime - trace->calltime;
  924. struct fgraph_data *data = iter->private;
  925. struct trace_array *tr = iter->tr;
  926. pid_t pid = ent->pid;
  927. int cpu = iter->cpu;
  928. int func_match = 1;
  929. int i;
  930. if (check_irq_return(iter, flags, trace->depth))
  931. return TRACE_TYPE_HANDLED;
  932. if (data) {
  933. struct fgraph_cpu_data *cpu_data;
  934. int cpu = iter->cpu;
  935. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  936. /*
  937. * Comments display at + 1 to depth. This is the
  938. * return from a function, we now want the comments
  939. * to display at the same level of the bracket.
  940. */
  941. cpu_data->depth = trace->depth - 1;
  942. if (trace->depth < FTRACE_RETFUNC_DEPTH &&
  943. !WARN_ON_ONCE(trace->depth < 0)) {
  944. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  945. func_match = 0;
  946. cpu_data->enter_funcs[trace->depth] = 0;
  947. }
  948. }
  949. print_graph_prologue(iter, s, 0, 0, flags);
  950. /* Overhead and duration */
  951. print_graph_duration(tr, duration, s, flags);
  952. /* Closing brace */
  953. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
  954. trace_seq_putc(s, ' ');
  955. /*
  956. * If the return function does not have a matching entry,
  957. * then the entry was lost. Instead of just printing
  958. * the '}' and letting the user guess what function this
  959. * belongs to, write out the function name. Always do
  960. * that if the funcgraph-tail option is enabled.
  961. */
  962. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
  963. trace_seq_puts(s, "}\n");
  964. else
  965. trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  966. /* Overrun */
  967. if (flags & TRACE_GRAPH_PRINT_OVERRUN)
  968. trace_seq_printf(s, " (Overruns: %lu)\n",
  969. trace->overrun);
  970. print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  971. cpu, pid, flags);
  972. return trace_handle_return(s);
  973. }
  974. static enum print_line_t
  975. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  976. struct trace_iterator *iter, u32 flags)
  977. {
  978. struct trace_array *tr = iter->tr;
  979. unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
  980. struct fgraph_data *data = iter->private;
  981. struct trace_event *event;
  982. int depth = 0;
  983. int ret;
  984. int i;
  985. if (data)
  986. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  987. print_graph_prologue(iter, s, 0, 0, flags);
  988. /* No time */
  989. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  990. /* Indentation */
  991. if (depth > 0)
  992. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
  993. trace_seq_putc(s, ' ');
  994. /* The comment */
  995. trace_seq_puts(s, "/* ");
  996. switch (iter->ent->type) {
  997. case TRACE_BPUTS:
  998. ret = trace_print_bputs_msg_only(iter);
  999. if (ret != TRACE_TYPE_HANDLED)
  1000. return ret;
  1001. break;
  1002. case TRACE_BPRINT:
  1003. ret = trace_print_bprintk_msg_only(iter);
  1004. if (ret != TRACE_TYPE_HANDLED)
  1005. return ret;
  1006. break;
  1007. case TRACE_PRINT:
  1008. ret = trace_print_printk_msg_only(iter);
  1009. if (ret != TRACE_TYPE_HANDLED)
  1010. return ret;
  1011. break;
  1012. default:
  1013. event = ftrace_find_event(ent->type);
  1014. if (!event)
  1015. return TRACE_TYPE_UNHANDLED;
  1016. ret = event->funcs->trace(iter, sym_flags, event);
  1017. if (ret != TRACE_TYPE_HANDLED)
  1018. return ret;
  1019. }
  1020. if (trace_seq_has_overflowed(s))
  1021. goto out;
  1022. /* Strip ending newline */
  1023. if (s->buffer[s->seq.len - 1] == '\n') {
  1024. s->buffer[s->seq.len - 1] = '\0';
  1025. s->seq.len--;
  1026. }
  1027. trace_seq_puts(s, " */\n");
  1028. out:
  1029. return trace_handle_return(s);
  1030. }
  1031. enum print_line_t
  1032. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1033. {
  1034. struct ftrace_graph_ent_entry *field;
  1035. struct fgraph_data *data = iter->private;
  1036. struct trace_entry *entry = iter->ent;
  1037. struct trace_seq *s = &iter->seq;
  1038. int cpu = iter->cpu;
  1039. int ret;
  1040. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1041. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1042. return TRACE_TYPE_HANDLED;
  1043. }
  1044. /*
  1045. * If the last output failed, there's a possibility we need
  1046. * to print out the missing entry which would never go out.
  1047. */
  1048. if (data && data->failed) {
  1049. field = &data->ent;
  1050. iter->cpu = data->cpu;
  1051. ret = print_graph_entry(field, s, iter, flags);
  1052. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1053. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1054. ret = TRACE_TYPE_NO_CONSUME;
  1055. }
  1056. iter->cpu = cpu;
  1057. return ret;
  1058. }
  1059. switch (entry->type) {
  1060. case TRACE_GRAPH_ENT: {
  1061. /*
  1062. * print_graph_entry() may consume the current event,
  1063. * thus @field may become invalid, so we need to save it.
  1064. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1065. * it can be safely saved at the stack.
  1066. */
  1067. struct ftrace_graph_ent_entry saved;
  1068. trace_assign_type(field, entry);
  1069. saved = *field;
  1070. return print_graph_entry(&saved, s, iter, flags);
  1071. }
  1072. case TRACE_GRAPH_RET: {
  1073. struct ftrace_graph_ret_entry *field;
  1074. trace_assign_type(field, entry);
  1075. return print_graph_return(&field->ret, s, entry, iter, flags);
  1076. }
  1077. case TRACE_STACK:
  1078. case TRACE_FN:
  1079. /* dont trace stack and functions as comments */
  1080. return TRACE_TYPE_UNHANDLED;
  1081. default:
  1082. return print_graph_comment(s, entry, iter, flags);
  1083. }
  1084. return TRACE_TYPE_HANDLED;
  1085. }
  1086. static enum print_line_t
  1087. print_graph_function(struct trace_iterator *iter)
  1088. {
  1089. return print_graph_function_flags(iter, tracer_flags.val);
  1090. }
  1091. static enum print_line_t
  1092. print_graph_function_event(struct trace_iterator *iter, int flags,
  1093. struct trace_event *event)
  1094. {
  1095. return print_graph_function(iter);
  1096. }
  1097. static void print_lat_header(struct seq_file *s, u32 flags)
  1098. {
  1099. static const char spaces[] = " " /* 16 spaces */
  1100. " " /* 4 spaces */
  1101. " "; /* 17 spaces */
  1102. int size = 0;
  1103. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1104. size += 16;
  1105. if (flags & TRACE_GRAPH_PRINT_CPU)
  1106. size += 4;
  1107. if (flags & TRACE_GRAPH_PRINT_PROC)
  1108. size += 17;
  1109. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1110. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1111. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1112. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1113. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1114. }
  1115. static void __print_graph_headers_flags(struct trace_array *tr,
  1116. struct seq_file *s, u32 flags)
  1117. {
  1118. int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
  1119. if (lat)
  1120. print_lat_header(s, flags);
  1121. /* 1st line */
  1122. seq_putc(s, '#');
  1123. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1124. seq_puts(s, " TIME ");
  1125. if (flags & TRACE_GRAPH_PRINT_CPU)
  1126. seq_puts(s, " CPU");
  1127. if (flags & TRACE_GRAPH_PRINT_PROC)
  1128. seq_puts(s, " TASK/PID ");
  1129. if (lat)
  1130. seq_puts(s, "||||");
  1131. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1132. seq_puts(s, " DURATION ");
  1133. seq_puts(s, " FUNCTION CALLS\n");
  1134. /* 2nd line */
  1135. seq_putc(s, '#');
  1136. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1137. seq_puts(s, " | ");
  1138. if (flags & TRACE_GRAPH_PRINT_CPU)
  1139. seq_puts(s, " | ");
  1140. if (flags & TRACE_GRAPH_PRINT_PROC)
  1141. seq_puts(s, " | | ");
  1142. if (lat)
  1143. seq_puts(s, "||||");
  1144. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1145. seq_puts(s, " | | ");
  1146. seq_puts(s, " | | | |\n");
  1147. }
  1148. static void print_graph_headers(struct seq_file *s)
  1149. {
  1150. print_graph_headers_flags(s, tracer_flags.val);
  1151. }
  1152. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1153. {
  1154. struct trace_iterator *iter = s->private;
  1155. struct trace_array *tr = iter->tr;
  1156. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  1157. return;
  1158. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
  1159. /* print nothing if the buffers are empty */
  1160. if (trace_empty(iter))
  1161. return;
  1162. print_trace_header(s, iter);
  1163. }
  1164. __print_graph_headers_flags(tr, s, flags);
  1165. }
  1166. void graph_trace_open(struct trace_iterator *iter)
  1167. {
  1168. /* pid and depth on the last trace processed */
  1169. struct fgraph_data *data;
  1170. gfp_t gfpflags;
  1171. int cpu;
  1172. iter->private = NULL;
  1173. /* We can be called in atomic context via ftrace_dump() */
  1174. gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  1175. data = kzalloc(sizeof(*data), gfpflags);
  1176. if (!data)
  1177. goto out_err;
  1178. data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
  1179. if (!data->cpu_data)
  1180. goto out_err_free;
  1181. for_each_possible_cpu(cpu) {
  1182. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1183. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1184. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1185. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1186. *pid = -1;
  1187. *depth = 0;
  1188. *ignore = 0;
  1189. *depth_irq = -1;
  1190. }
  1191. iter->private = data;
  1192. return;
  1193. out_err_free:
  1194. kfree(data);
  1195. out_err:
  1196. pr_warn("function graph tracer: not enough memory\n");
  1197. }
  1198. void graph_trace_close(struct trace_iterator *iter)
  1199. {
  1200. struct fgraph_data *data = iter->private;
  1201. if (data) {
  1202. free_percpu(data->cpu_data);
  1203. kfree(data);
  1204. }
  1205. }
  1206. static int
  1207. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1208. {
  1209. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1210. ftrace_graph_skip_irqs = !set;
  1211. if (bit == TRACE_GRAPH_SLEEP_TIME)
  1212. ftrace_graph_sleep_time_control(set);
  1213. if (bit == TRACE_GRAPH_GRAPH_TIME)
  1214. ftrace_graph_graph_time_control(set);
  1215. return 0;
  1216. }
  1217. static struct trace_event_functions graph_functions = {
  1218. .trace = print_graph_function_event,
  1219. };
  1220. static struct trace_event graph_trace_entry_event = {
  1221. .type = TRACE_GRAPH_ENT,
  1222. .funcs = &graph_functions,
  1223. };
  1224. static struct trace_event graph_trace_ret_event = {
  1225. .type = TRACE_GRAPH_RET,
  1226. .funcs = &graph_functions
  1227. };
  1228. static struct tracer graph_trace __tracer_data = {
  1229. .name = "function_graph",
  1230. .update_thresh = graph_trace_update_thresh,
  1231. .open = graph_trace_open,
  1232. .pipe_open = graph_trace_open,
  1233. .close = graph_trace_close,
  1234. .pipe_close = graph_trace_close,
  1235. .init = graph_trace_init,
  1236. .reset = graph_trace_reset,
  1237. .print_line = print_graph_function,
  1238. .print_header = print_graph_headers,
  1239. .flags = &tracer_flags,
  1240. .set_flag = func_graph_set_flag,
  1241. #ifdef CONFIG_FTRACE_SELFTEST
  1242. .selftest = trace_selftest_startup_function_graph,
  1243. #endif
  1244. };
  1245. static ssize_t
  1246. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1247. loff_t *ppos)
  1248. {
  1249. unsigned long val;
  1250. int ret;
  1251. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1252. if (ret)
  1253. return ret;
  1254. fgraph_max_depth = val;
  1255. *ppos += cnt;
  1256. return cnt;
  1257. }
  1258. static ssize_t
  1259. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1260. loff_t *ppos)
  1261. {
  1262. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1263. int n;
  1264. n = sprintf(buf, "%d\n", fgraph_max_depth);
  1265. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1266. }
  1267. static const struct file_operations graph_depth_fops = {
  1268. .open = tracing_open_generic,
  1269. .write = graph_depth_write,
  1270. .read = graph_depth_read,
  1271. .llseek = generic_file_llseek,
  1272. };
  1273. static __init int init_graph_tracefs(void)
  1274. {
  1275. struct dentry *d_tracer;
  1276. d_tracer = tracing_init_dentry();
  1277. if (IS_ERR(d_tracer))
  1278. return 0;
  1279. trace_create_file("max_graph_depth", 0644, d_tracer,
  1280. NULL, &graph_depth_fops);
  1281. return 0;
  1282. }
  1283. fs_initcall(init_graph_tracefs);
  1284. static __init int init_graph_trace(void)
  1285. {
  1286. max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
  1287. if (!register_trace_event(&graph_trace_entry_event)) {
  1288. pr_warn("Warning: could not register graph trace events\n");
  1289. return 1;
  1290. }
  1291. if (!register_trace_event(&graph_trace_ret_event)) {
  1292. pr_warn("Warning: could not register graph trace events\n");
  1293. return 1;
  1294. }
  1295. return register_tracer(&graph_trace);
  1296. }
  1297. core_initcall(init_graph_trace);