trace_functions_graph.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. /* When set, irq functions will be ignored */
  17. static int ftrace_graph_skip_irqs;
  18. struct fgraph_cpu_data {
  19. pid_t last_pid;
  20. int depth;
  21. int depth_irq;
  22. int ignore;
  23. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  24. };
  25. struct fgraph_data {
  26. struct fgraph_cpu_data __percpu *cpu_data;
  27. /* Place to preserve last processed entry. */
  28. struct ftrace_graph_ent_entry ent;
  29. struct ftrace_graph_ret_entry ret;
  30. int failed;
  31. int cpu;
  32. };
  33. #define TRACE_GRAPH_INDENT 2
  34. static unsigned int max_depth;
  35. static struct tracer_opt trace_opts[] = {
  36. /* Display overruns? (for self-debug purpose) */
  37. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  38. /* Display CPU ? */
  39. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  40. /* Display Overhead ? */
  41. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  42. /* Display proc name/pid */
  43. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  44. /* Display duration of execution */
  45. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  46. /* Display absolute time of an entry */
  47. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  48. /* Display interrupts */
  49. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  50. /* Display function name after trailing } */
  51. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  52. { } /* Empty entry */
  53. };
  54. static struct tracer_flags tracer_flags = {
  55. /* Don't display overruns, proc, or tail by default */
  56. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  57. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
  58. .opts = trace_opts
  59. };
  60. static struct trace_array *graph_array;
  61. /*
  62. * DURATION column is being also used to display IRQ signs,
  63. * following values are used by print_graph_irq and others
  64. * to fill in space into DURATION column.
  65. */
  66. enum {
  67. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  68. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  69. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  70. };
  71. static enum print_line_t
  72. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  73. u32 flags);
  74. /* Add a function return address to the trace stack on thread info.*/
  75. int
  76. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  77. unsigned long frame_pointer)
  78. {
  79. unsigned long long calltime;
  80. int index;
  81. if (!current->ret_stack)
  82. return -EBUSY;
  83. /*
  84. * We must make sure the ret_stack is tested before we read
  85. * anything else.
  86. */
  87. smp_rmb();
  88. /* The return trace stack is full */
  89. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  90. atomic_inc(&current->trace_overrun);
  91. return -EBUSY;
  92. }
  93. /*
  94. * The curr_ret_stack is an index to ftrace return stack of
  95. * current task. Its value should be in [0, FTRACE_RETFUNC_
  96. * DEPTH) when the function graph tracer is used. To support
  97. * filtering out specific functions, it makes the index
  98. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  99. * so when it sees a negative index the ftrace will ignore
  100. * the record. And the index gets recovered when returning
  101. * from the filtered function by adding the FTRACE_NOTRACE_
  102. * DEPTH and then it'll continue to record functions normally.
  103. *
  104. * The curr_ret_stack is initialized to -1 and get increased
  105. * in this function. So it can be less than -1 only if it was
  106. * filtered out via ftrace_graph_notrace_addr() which can be
  107. * set from set_graph_notrace file in debugfs by user.
  108. */
  109. if (current->curr_ret_stack < -1)
  110. return -EBUSY;
  111. calltime = trace_clock_local();
  112. index = ++current->curr_ret_stack;
  113. if (ftrace_graph_notrace_addr(func))
  114. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  115. barrier();
  116. current->ret_stack[index].ret = ret;
  117. current->ret_stack[index].func = func;
  118. current->ret_stack[index].calltime = calltime;
  119. current->ret_stack[index].subtime = 0;
  120. current->ret_stack[index].fp = frame_pointer;
  121. *depth = current->curr_ret_stack;
  122. return 0;
  123. }
  124. /* Retrieve a function return address to the trace stack on thread info.*/
  125. static void
  126. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  127. unsigned long frame_pointer)
  128. {
  129. int index;
  130. index = current->curr_ret_stack;
  131. /*
  132. * A negative index here means that it's just returned from a
  133. * notrace'd function. Recover index to get an original
  134. * return address. See ftrace_push_return_trace().
  135. *
  136. * TODO: Need to check whether the stack gets corrupted.
  137. */
  138. if (index < 0)
  139. index += FTRACE_NOTRACE_DEPTH;
  140. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  141. ftrace_graph_stop();
  142. WARN_ON(1);
  143. /* Might as well panic, otherwise we have no where to go */
  144. *ret = (unsigned long)panic;
  145. return;
  146. }
  147. #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
  148. /*
  149. * The arch may choose to record the frame pointer used
  150. * and check it here to make sure that it is what we expect it
  151. * to be. If gcc does not set the place holder of the return
  152. * address in the frame pointer, and does a copy instead, then
  153. * the function graph trace will fail. This test detects this
  154. * case.
  155. *
  156. * Currently, x86_32 with optimize for size (-Os) makes the latest
  157. * gcc do the above.
  158. *
  159. * Note, -mfentry does not use frame pointers, and this test
  160. * is not needed if CC_USING_FENTRY is set.
  161. */
  162. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  163. ftrace_graph_stop();
  164. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  165. " from func %ps return to %lx\n",
  166. current->ret_stack[index].fp,
  167. frame_pointer,
  168. (void *)current->ret_stack[index].func,
  169. current->ret_stack[index].ret);
  170. *ret = (unsigned long)panic;
  171. return;
  172. }
  173. #endif
  174. *ret = current->ret_stack[index].ret;
  175. trace->func = current->ret_stack[index].func;
  176. trace->calltime = current->ret_stack[index].calltime;
  177. trace->overrun = atomic_read(&current->trace_overrun);
  178. trace->depth = index;
  179. }
  180. /*
  181. * Send the trace to the ring-buffer.
  182. * @return the original return address.
  183. */
  184. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  185. {
  186. struct ftrace_graph_ret trace;
  187. unsigned long ret;
  188. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  189. trace.rettime = trace_clock_local();
  190. barrier();
  191. current->curr_ret_stack--;
  192. /*
  193. * The curr_ret_stack can be less than -1 only if it was
  194. * filtered out and it's about to return from the function.
  195. * Recover the index and continue to trace normal functions.
  196. */
  197. if (current->curr_ret_stack < -1) {
  198. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  199. return ret;
  200. }
  201. /*
  202. * The trace should run after decrementing the ret counter
  203. * in case an interrupt were to come in. We don't want to
  204. * lose the interrupt if max_depth is set.
  205. */
  206. ftrace_graph_return(&trace);
  207. if (unlikely(!ret)) {
  208. ftrace_graph_stop();
  209. WARN_ON(1);
  210. /* Might as well panic. What else to do? */
  211. ret = (unsigned long)panic;
  212. }
  213. return ret;
  214. }
  215. int __trace_graph_entry(struct trace_array *tr,
  216. struct ftrace_graph_ent *trace,
  217. unsigned long flags,
  218. int pc)
  219. {
  220. struct ftrace_event_call *call = &event_funcgraph_entry;
  221. struct ring_buffer_event *event;
  222. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  223. struct ftrace_graph_ent_entry *entry;
  224. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  225. return 0;
  226. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  227. sizeof(*entry), flags, pc);
  228. if (!event)
  229. return 0;
  230. entry = ring_buffer_event_data(event);
  231. entry->graph_ent = *trace;
  232. if (!call_filter_check_discard(call, entry, buffer, event))
  233. __buffer_unlock_commit(buffer, event);
  234. return 1;
  235. }
  236. static inline int ftrace_graph_ignore_irqs(void)
  237. {
  238. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  239. return 0;
  240. return in_irq();
  241. }
  242. int trace_graph_entry(struct ftrace_graph_ent *trace)
  243. {
  244. struct trace_array *tr = graph_array;
  245. struct trace_array_cpu *data;
  246. unsigned long flags;
  247. long disabled;
  248. int ret;
  249. int cpu;
  250. int pc;
  251. if (!ftrace_trace_task(current))
  252. return 0;
  253. /* trace it when it is-nested-in or is a function enabled. */
  254. if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
  255. ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
  256. (max_depth && trace->depth >= max_depth))
  257. return 0;
  258. /*
  259. * Do not trace a function if it's filtered by set_graph_notrace.
  260. * Make the index of ret stack negative to indicate that it should
  261. * ignore further functions. But it needs its own ret stack entry
  262. * to recover the original index in order to continue tracing after
  263. * returning from the function.
  264. */
  265. if (ftrace_graph_notrace_addr(trace->func))
  266. return 1;
  267. local_irq_save(flags);
  268. cpu = raw_smp_processor_id();
  269. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  270. disabled = atomic_inc_return(&data->disabled);
  271. if (likely(disabled == 1)) {
  272. pc = preempt_count();
  273. ret = __trace_graph_entry(tr, trace, flags, pc);
  274. } else {
  275. ret = 0;
  276. }
  277. atomic_dec(&data->disabled);
  278. local_irq_restore(flags);
  279. return ret;
  280. }
  281. int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
  282. {
  283. if (tracing_thresh)
  284. return 1;
  285. else
  286. return trace_graph_entry(trace);
  287. }
  288. static void
  289. __trace_graph_function(struct trace_array *tr,
  290. unsigned long ip, unsigned long flags, int pc)
  291. {
  292. u64 time = trace_clock_local();
  293. struct ftrace_graph_ent ent = {
  294. .func = ip,
  295. .depth = 0,
  296. };
  297. struct ftrace_graph_ret ret = {
  298. .func = ip,
  299. .depth = 0,
  300. .calltime = time,
  301. .rettime = time,
  302. };
  303. __trace_graph_entry(tr, &ent, flags, pc);
  304. __trace_graph_return(tr, &ret, flags, pc);
  305. }
  306. void
  307. trace_graph_function(struct trace_array *tr,
  308. unsigned long ip, unsigned long parent_ip,
  309. unsigned long flags, int pc)
  310. {
  311. __trace_graph_function(tr, ip, flags, pc);
  312. }
  313. void __trace_graph_return(struct trace_array *tr,
  314. struct ftrace_graph_ret *trace,
  315. unsigned long flags,
  316. int pc)
  317. {
  318. struct ftrace_event_call *call = &event_funcgraph_exit;
  319. struct ring_buffer_event *event;
  320. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  321. struct ftrace_graph_ret_entry *entry;
  322. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  323. return;
  324. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  325. sizeof(*entry), flags, pc);
  326. if (!event)
  327. return;
  328. entry = ring_buffer_event_data(event);
  329. entry->ret = *trace;
  330. if (!call_filter_check_discard(call, entry, buffer, event))
  331. __buffer_unlock_commit(buffer, event);
  332. }
  333. void trace_graph_return(struct ftrace_graph_ret *trace)
  334. {
  335. struct trace_array *tr = graph_array;
  336. struct trace_array_cpu *data;
  337. unsigned long flags;
  338. long disabled;
  339. int cpu;
  340. int pc;
  341. local_irq_save(flags);
  342. cpu = raw_smp_processor_id();
  343. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  344. disabled = atomic_inc_return(&data->disabled);
  345. if (likely(disabled == 1)) {
  346. pc = preempt_count();
  347. __trace_graph_return(tr, trace, flags, pc);
  348. }
  349. atomic_dec(&data->disabled);
  350. local_irq_restore(flags);
  351. }
  352. void set_graph_array(struct trace_array *tr)
  353. {
  354. graph_array = tr;
  355. /* Make graph_array visible before we start tracing */
  356. smp_mb();
  357. }
  358. void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  359. {
  360. if (tracing_thresh &&
  361. (trace->rettime - trace->calltime < tracing_thresh))
  362. return;
  363. else
  364. trace_graph_return(trace);
  365. }
  366. static int graph_trace_init(struct trace_array *tr)
  367. {
  368. int ret;
  369. set_graph_array(tr);
  370. if (tracing_thresh)
  371. ret = register_ftrace_graph(&trace_graph_thresh_return,
  372. &trace_graph_thresh_entry);
  373. else
  374. ret = register_ftrace_graph(&trace_graph_return,
  375. &trace_graph_entry);
  376. if (ret)
  377. return ret;
  378. tracing_start_cmdline_record();
  379. return 0;
  380. }
  381. static void graph_trace_reset(struct trace_array *tr)
  382. {
  383. tracing_stop_cmdline_record();
  384. unregister_ftrace_graph();
  385. }
  386. static int max_bytes_for_cpu;
  387. static enum print_line_t
  388. print_graph_cpu(struct trace_seq *s, int cpu)
  389. {
  390. int ret;
  391. /*
  392. * Start with a space character - to make it stand out
  393. * to the right a bit when trace output is pasted into
  394. * email:
  395. */
  396. ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  397. if (!ret)
  398. return TRACE_TYPE_PARTIAL_LINE;
  399. return TRACE_TYPE_HANDLED;
  400. }
  401. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  402. static enum print_line_t
  403. print_graph_proc(struct trace_seq *s, pid_t pid)
  404. {
  405. char comm[TASK_COMM_LEN];
  406. /* sign + log10(MAX_INT) + '\0' */
  407. char pid_str[11];
  408. int spaces = 0;
  409. int ret;
  410. int len;
  411. int i;
  412. trace_find_cmdline(pid, comm);
  413. comm[7] = '\0';
  414. sprintf(pid_str, "%d", pid);
  415. /* 1 stands for the "-" character */
  416. len = strlen(comm) + strlen(pid_str) + 1;
  417. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  418. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  419. /* First spaces to align center */
  420. for (i = 0; i < spaces / 2; i++) {
  421. ret = trace_seq_putc(s, ' ');
  422. if (!ret)
  423. return TRACE_TYPE_PARTIAL_LINE;
  424. }
  425. ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
  426. if (!ret)
  427. return TRACE_TYPE_PARTIAL_LINE;
  428. /* Last spaces to align center */
  429. for (i = 0; i < spaces - (spaces / 2); i++) {
  430. ret = trace_seq_putc(s, ' ');
  431. if (!ret)
  432. return TRACE_TYPE_PARTIAL_LINE;
  433. }
  434. return TRACE_TYPE_HANDLED;
  435. }
  436. static enum print_line_t
  437. print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  438. {
  439. if (!trace_seq_putc(s, ' '))
  440. return 0;
  441. return trace_print_lat_fmt(s, entry);
  442. }
  443. /* If the pid changed since the last trace, output this event */
  444. static enum print_line_t
  445. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  446. {
  447. pid_t prev_pid;
  448. pid_t *last_pid;
  449. int ret;
  450. if (!data)
  451. return TRACE_TYPE_HANDLED;
  452. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  453. if (*last_pid == pid)
  454. return TRACE_TYPE_HANDLED;
  455. prev_pid = *last_pid;
  456. *last_pid = pid;
  457. if (prev_pid == -1)
  458. return TRACE_TYPE_HANDLED;
  459. /*
  460. * Context-switch trace line:
  461. ------------------------------------------
  462. | 1) migration/0--1 => sshd-1755
  463. ------------------------------------------
  464. */
  465. ret = trace_seq_puts(s,
  466. " ------------------------------------------\n");
  467. if (!ret)
  468. return TRACE_TYPE_PARTIAL_LINE;
  469. ret = print_graph_cpu(s, cpu);
  470. if (ret == TRACE_TYPE_PARTIAL_LINE)
  471. return TRACE_TYPE_PARTIAL_LINE;
  472. ret = print_graph_proc(s, prev_pid);
  473. if (ret == TRACE_TYPE_PARTIAL_LINE)
  474. return TRACE_TYPE_PARTIAL_LINE;
  475. ret = trace_seq_puts(s, " => ");
  476. if (!ret)
  477. return TRACE_TYPE_PARTIAL_LINE;
  478. ret = print_graph_proc(s, pid);
  479. if (ret == TRACE_TYPE_PARTIAL_LINE)
  480. return TRACE_TYPE_PARTIAL_LINE;
  481. ret = trace_seq_puts(s,
  482. "\n ------------------------------------------\n\n");
  483. if (!ret)
  484. return TRACE_TYPE_PARTIAL_LINE;
  485. return TRACE_TYPE_HANDLED;
  486. }
  487. static struct ftrace_graph_ret_entry *
  488. get_return_for_leaf(struct trace_iterator *iter,
  489. struct ftrace_graph_ent_entry *curr)
  490. {
  491. struct fgraph_data *data = iter->private;
  492. struct ring_buffer_iter *ring_iter = NULL;
  493. struct ring_buffer_event *event;
  494. struct ftrace_graph_ret_entry *next;
  495. /*
  496. * If the previous output failed to write to the seq buffer,
  497. * then we just reuse the data from before.
  498. */
  499. if (data && data->failed) {
  500. curr = &data->ent;
  501. next = &data->ret;
  502. } else {
  503. ring_iter = trace_buffer_iter(iter, iter->cpu);
  504. /* First peek to compare current entry and the next one */
  505. if (ring_iter)
  506. event = ring_buffer_iter_peek(ring_iter, NULL);
  507. else {
  508. /*
  509. * We need to consume the current entry to see
  510. * the next one.
  511. */
  512. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  513. NULL, NULL);
  514. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  515. NULL, NULL);
  516. }
  517. if (!event)
  518. return NULL;
  519. next = ring_buffer_event_data(event);
  520. if (data) {
  521. /*
  522. * Save current and next entries for later reference
  523. * if the output fails.
  524. */
  525. data->ent = *curr;
  526. /*
  527. * If the next event is not a return type, then
  528. * we only care about what type it is. Otherwise we can
  529. * safely copy the entire event.
  530. */
  531. if (next->ent.type == TRACE_GRAPH_RET)
  532. data->ret = *next;
  533. else
  534. data->ret.ent.type = next->ent.type;
  535. }
  536. }
  537. if (next->ent.type != TRACE_GRAPH_RET)
  538. return NULL;
  539. if (curr->ent.pid != next->ent.pid ||
  540. curr->graph_ent.func != next->ret.func)
  541. return NULL;
  542. /* this is a leaf, now advance the iterator */
  543. if (ring_iter)
  544. ring_buffer_read(ring_iter, NULL);
  545. return next;
  546. }
  547. static int print_graph_abs_time(u64 t, struct trace_seq *s)
  548. {
  549. unsigned long usecs_rem;
  550. usecs_rem = do_div(t, NSEC_PER_SEC);
  551. usecs_rem /= 1000;
  552. return trace_seq_printf(s, "%5lu.%06lu | ",
  553. (unsigned long)t, usecs_rem);
  554. }
  555. static enum print_line_t
  556. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  557. enum trace_type type, int cpu, pid_t pid, u32 flags)
  558. {
  559. int ret;
  560. struct trace_seq *s = &iter->seq;
  561. if (addr < (unsigned long)__irqentry_text_start ||
  562. addr >= (unsigned long)__irqentry_text_end)
  563. return TRACE_TYPE_UNHANDLED;
  564. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  565. /* Absolute time */
  566. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  567. ret = print_graph_abs_time(iter->ts, s);
  568. if (!ret)
  569. return TRACE_TYPE_PARTIAL_LINE;
  570. }
  571. /* Cpu */
  572. if (flags & TRACE_GRAPH_PRINT_CPU) {
  573. ret = print_graph_cpu(s, cpu);
  574. if (ret == TRACE_TYPE_PARTIAL_LINE)
  575. return TRACE_TYPE_PARTIAL_LINE;
  576. }
  577. /* Proc */
  578. if (flags & TRACE_GRAPH_PRINT_PROC) {
  579. ret = print_graph_proc(s, pid);
  580. if (ret == TRACE_TYPE_PARTIAL_LINE)
  581. return TRACE_TYPE_PARTIAL_LINE;
  582. ret = trace_seq_puts(s, " | ");
  583. if (!ret)
  584. return TRACE_TYPE_PARTIAL_LINE;
  585. }
  586. }
  587. /* No overhead */
  588. ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
  589. if (ret != TRACE_TYPE_HANDLED)
  590. return ret;
  591. if (type == TRACE_GRAPH_ENT)
  592. ret = trace_seq_puts(s, "==========>");
  593. else
  594. ret = trace_seq_puts(s, "<==========");
  595. if (!ret)
  596. return TRACE_TYPE_PARTIAL_LINE;
  597. ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
  598. if (ret != TRACE_TYPE_HANDLED)
  599. return ret;
  600. ret = trace_seq_putc(s, '\n');
  601. if (!ret)
  602. return TRACE_TYPE_PARTIAL_LINE;
  603. return TRACE_TYPE_HANDLED;
  604. }
  605. enum print_line_t
  606. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  607. {
  608. unsigned long nsecs_rem = do_div(duration, 1000);
  609. /* log10(ULONG_MAX) + '\0' */
  610. char msecs_str[21];
  611. char nsecs_str[5];
  612. int ret, len;
  613. int i;
  614. sprintf(msecs_str, "%lu", (unsigned long) duration);
  615. /* Print msecs */
  616. ret = trace_seq_printf(s, "%s", msecs_str);
  617. if (!ret)
  618. return TRACE_TYPE_PARTIAL_LINE;
  619. len = strlen(msecs_str);
  620. /* Print nsecs (we don't want to exceed 7 numbers) */
  621. if (len < 7) {
  622. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  623. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  624. ret = trace_seq_printf(s, ".%s", nsecs_str);
  625. if (!ret)
  626. return TRACE_TYPE_PARTIAL_LINE;
  627. len += strlen(nsecs_str);
  628. }
  629. ret = trace_seq_puts(s, " us ");
  630. if (!ret)
  631. return TRACE_TYPE_PARTIAL_LINE;
  632. /* Print remaining spaces to fit the row's width */
  633. for (i = len; i < 7; i++) {
  634. ret = trace_seq_putc(s, ' ');
  635. if (!ret)
  636. return TRACE_TYPE_PARTIAL_LINE;
  637. }
  638. return TRACE_TYPE_HANDLED;
  639. }
  640. static enum print_line_t
  641. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  642. u32 flags)
  643. {
  644. int ret = -1;
  645. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  646. !(trace_flags & TRACE_ITER_CONTEXT_INFO))
  647. return TRACE_TYPE_HANDLED;
  648. /* No real adata, just filling the column with spaces */
  649. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  650. case FLAGS_FILL_FULL:
  651. ret = trace_seq_puts(s, " | ");
  652. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  653. case FLAGS_FILL_START:
  654. ret = trace_seq_puts(s, " ");
  655. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  656. case FLAGS_FILL_END:
  657. ret = trace_seq_puts(s, " |");
  658. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  659. }
  660. /* Signal a overhead of time execution to the output */
  661. if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
  662. /* Duration exceeded 100 msecs */
  663. if (duration > 100000ULL)
  664. ret = trace_seq_puts(s, "! ");
  665. /* Duration exceeded 10 msecs */
  666. else if (duration > 10000ULL)
  667. ret = trace_seq_puts(s, "+ ");
  668. }
  669. /*
  670. * The -1 means we either did not exceed the duration tresholds
  671. * or we dont want to print out the overhead. Either way we need
  672. * to fill out the space.
  673. */
  674. if (ret == -1)
  675. ret = trace_seq_puts(s, " ");
  676. /* Catching here any failure happenned above */
  677. if (!ret)
  678. return TRACE_TYPE_PARTIAL_LINE;
  679. ret = trace_print_graph_duration(duration, s);
  680. if (ret != TRACE_TYPE_HANDLED)
  681. return ret;
  682. ret = trace_seq_puts(s, "| ");
  683. if (!ret)
  684. return TRACE_TYPE_PARTIAL_LINE;
  685. return TRACE_TYPE_HANDLED;
  686. }
  687. /* Case of a leaf function on its call entry */
  688. static enum print_line_t
  689. print_graph_entry_leaf(struct trace_iterator *iter,
  690. struct ftrace_graph_ent_entry *entry,
  691. struct ftrace_graph_ret_entry *ret_entry,
  692. struct trace_seq *s, u32 flags)
  693. {
  694. struct fgraph_data *data = iter->private;
  695. struct ftrace_graph_ret *graph_ret;
  696. struct ftrace_graph_ent *call;
  697. unsigned long long duration;
  698. int ret;
  699. int i;
  700. graph_ret = &ret_entry->ret;
  701. call = &entry->graph_ent;
  702. duration = graph_ret->rettime - graph_ret->calltime;
  703. if (data) {
  704. struct fgraph_cpu_data *cpu_data;
  705. int cpu = iter->cpu;
  706. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  707. /*
  708. * Comments display at + 1 to depth. Since
  709. * this is a leaf function, keep the comments
  710. * equal to this depth.
  711. */
  712. cpu_data->depth = call->depth - 1;
  713. /* No need to keep this function around for this depth */
  714. if (call->depth < FTRACE_RETFUNC_DEPTH)
  715. cpu_data->enter_funcs[call->depth] = 0;
  716. }
  717. /* Overhead and duration */
  718. ret = print_graph_duration(duration, s, flags);
  719. if (ret == TRACE_TYPE_PARTIAL_LINE)
  720. return TRACE_TYPE_PARTIAL_LINE;
  721. /* Function */
  722. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  723. ret = trace_seq_putc(s, ' ');
  724. if (!ret)
  725. return TRACE_TYPE_PARTIAL_LINE;
  726. }
  727. ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
  728. if (!ret)
  729. return TRACE_TYPE_PARTIAL_LINE;
  730. return TRACE_TYPE_HANDLED;
  731. }
  732. static enum print_line_t
  733. print_graph_entry_nested(struct trace_iterator *iter,
  734. struct ftrace_graph_ent_entry *entry,
  735. struct trace_seq *s, int cpu, u32 flags)
  736. {
  737. struct ftrace_graph_ent *call = &entry->graph_ent;
  738. struct fgraph_data *data = iter->private;
  739. int ret;
  740. int i;
  741. if (data) {
  742. struct fgraph_cpu_data *cpu_data;
  743. int cpu = iter->cpu;
  744. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  745. cpu_data->depth = call->depth;
  746. /* Save this function pointer to see if the exit matches */
  747. if (call->depth < FTRACE_RETFUNC_DEPTH)
  748. cpu_data->enter_funcs[call->depth] = call->func;
  749. }
  750. /* No time */
  751. ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
  752. if (ret != TRACE_TYPE_HANDLED)
  753. return ret;
  754. /* Function */
  755. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  756. ret = trace_seq_putc(s, ' ');
  757. if (!ret)
  758. return TRACE_TYPE_PARTIAL_LINE;
  759. }
  760. ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  761. if (!ret)
  762. return TRACE_TYPE_PARTIAL_LINE;
  763. /*
  764. * we already consumed the current entry to check the next one
  765. * and see if this is a leaf.
  766. */
  767. return TRACE_TYPE_NO_CONSUME;
  768. }
  769. static enum print_line_t
  770. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  771. int type, unsigned long addr, u32 flags)
  772. {
  773. struct fgraph_data *data = iter->private;
  774. struct trace_entry *ent = iter->ent;
  775. int cpu = iter->cpu;
  776. int ret;
  777. /* Pid */
  778. if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
  779. return TRACE_TYPE_PARTIAL_LINE;
  780. if (type) {
  781. /* Interrupt */
  782. ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  783. if (ret == TRACE_TYPE_PARTIAL_LINE)
  784. return TRACE_TYPE_PARTIAL_LINE;
  785. }
  786. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  787. return 0;
  788. /* Absolute time */
  789. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  790. ret = print_graph_abs_time(iter->ts, s);
  791. if (!ret)
  792. return TRACE_TYPE_PARTIAL_LINE;
  793. }
  794. /* Cpu */
  795. if (flags & TRACE_GRAPH_PRINT_CPU) {
  796. ret = print_graph_cpu(s, cpu);
  797. if (ret == TRACE_TYPE_PARTIAL_LINE)
  798. return TRACE_TYPE_PARTIAL_LINE;
  799. }
  800. /* Proc */
  801. if (flags & TRACE_GRAPH_PRINT_PROC) {
  802. ret = print_graph_proc(s, ent->pid);
  803. if (ret == TRACE_TYPE_PARTIAL_LINE)
  804. return TRACE_TYPE_PARTIAL_LINE;
  805. ret = trace_seq_puts(s, " | ");
  806. if (!ret)
  807. return TRACE_TYPE_PARTIAL_LINE;
  808. }
  809. /* Latency format */
  810. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  811. ret = print_graph_lat_fmt(s, ent);
  812. if (ret == TRACE_TYPE_PARTIAL_LINE)
  813. return TRACE_TYPE_PARTIAL_LINE;
  814. }
  815. return 0;
  816. }
  817. /*
  818. * Entry check for irq code
  819. *
  820. * returns 1 if
  821. * - we are inside irq code
  822. * - we just entered irq code
  823. *
  824. * retunns 0 if
  825. * - funcgraph-interrupts option is set
  826. * - we are not inside irq code
  827. */
  828. static int
  829. check_irq_entry(struct trace_iterator *iter, u32 flags,
  830. unsigned long addr, int depth)
  831. {
  832. int cpu = iter->cpu;
  833. int *depth_irq;
  834. struct fgraph_data *data = iter->private;
  835. /*
  836. * If we are either displaying irqs, or we got called as
  837. * a graph event and private data does not exist,
  838. * then we bypass the irq check.
  839. */
  840. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  841. (!data))
  842. return 0;
  843. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  844. /*
  845. * We are inside the irq code
  846. */
  847. if (*depth_irq >= 0)
  848. return 1;
  849. if ((addr < (unsigned long)__irqentry_text_start) ||
  850. (addr >= (unsigned long)__irqentry_text_end))
  851. return 0;
  852. /*
  853. * We are entering irq code.
  854. */
  855. *depth_irq = depth;
  856. return 1;
  857. }
  858. /*
  859. * Return check for irq code
  860. *
  861. * returns 1 if
  862. * - we are inside irq code
  863. * - we just left irq code
  864. *
  865. * returns 0 if
  866. * - funcgraph-interrupts option is set
  867. * - we are not inside irq code
  868. */
  869. static int
  870. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  871. {
  872. int cpu = iter->cpu;
  873. int *depth_irq;
  874. struct fgraph_data *data = iter->private;
  875. /*
  876. * If we are either displaying irqs, or we got called as
  877. * a graph event and private data does not exist,
  878. * then we bypass the irq check.
  879. */
  880. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  881. (!data))
  882. return 0;
  883. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  884. /*
  885. * We are not inside the irq code.
  886. */
  887. if (*depth_irq == -1)
  888. return 0;
  889. /*
  890. * We are inside the irq code, and this is returning entry.
  891. * Let's not trace it and clear the entry depth, since
  892. * we are out of irq code.
  893. *
  894. * This condition ensures that we 'leave the irq code' once
  895. * we are out of the entry depth. Thus protecting us from
  896. * the RETURN entry loss.
  897. */
  898. if (*depth_irq >= depth) {
  899. *depth_irq = -1;
  900. return 1;
  901. }
  902. /*
  903. * We are inside the irq code, and this is not the entry.
  904. */
  905. return 1;
  906. }
  907. static enum print_line_t
  908. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  909. struct trace_iterator *iter, u32 flags)
  910. {
  911. struct fgraph_data *data = iter->private;
  912. struct ftrace_graph_ent *call = &field->graph_ent;
  913. struct ftrace_graph_ret_entry *leaf_ret;
  914. static enum print_line_t ret;
  915. int cpu = iter->cpu;
  916. if (check_irq_entry(iter, flags, call->func, call->depth))
  917. return TRACE_TYPE_HANDLED;
  918. if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
  919. return TRACE_TYPE_PARTIAL_LINE;
  920. leaf_ret = get_return_for_leaf(iter, field);
  921. if (leaf_ret)
  922. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  923. else
  924. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  925. if (data) {
  926. /*
  927. * If we failed to write our output, then we need to make
  928. * note of it. Because we already consumed our entry.
  929. */
  930. if (s->full) {
  931. data->failed = 1;
  932. data->cpu = cpu;
  933. } else
  934. data->failed = 0;
  935. }
  936. return ret;
  937. }
  938. static enum print_line_t
  939. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  940. struct trace_entry *ent, struct trace_iterator *iter,
  941. u32 flags)
  942. {
  943. unsigned long long duration = trace->rettime - trace->calltime;
  944. struct fgraph_data *data = iter->private;
  945. pid_t pid = ent->pid;
  946. int cpu = iter->cpu;
  947. int func_match = 1;
  948. int ret;
  949. int i;
  950. if (check_irq_return(iter, flags, trace->depth))
  951. return TRACE_TYPE_HANDLED;
  952. if (data) {
  953. struct fgraph_cpu_data *cpu_data;
  954. int cpu = iter->cpu;
  955. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  956. /*
  957. * Comments display at + 1 to depth. This is the
  958. * return from a function, we now want the comments
  959. * to display at the same level of the bracket.
  960. */
  961. cpu_data->depth = trace->depth - 1;
  962. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  963. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  964. func_match = 0;
  965. cpu_data->enter_funcs[trace->depth] = 0;
  966. }
  967. }
  968. if (print_graph_prologue(iter, s, 0, 0, flags))
  969. return TRACE_TYPE_PARTIAL_LINE;
  970. /* Overhead and duration */
  971. ret = print_graph_duration(duration, s, flags);
  972. if (ret == TRACE_TYPE_PARTIAL_LINE)
  973. return TRACE_TYPE_PARTIAL_LINE;
  974. /* Closing brace */
  975. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  976. ret = trace_seq_putc(s, ' ');
  977. if (!ret)
  978. return TRACE_TYPE_PARTIAL_LINE;
  979. }
  980. /*
  981. * If the return function does not have a matching entry,
  982. * then the entry was lost. Instead of just printing
  983. * the '}' and letting the user guess what function this
  984. * belongs to, write out the function name. Always do
  985. * that if the funcgraph-tail option is enabled.
  986. */
  987. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
  988. ret = trace_seq_puts(s, "}\n");
  989. if (!ret)
  990. return TRACE_TYPE_PARTIAL_LINE;
  991. } else {
  992. ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  993. if (!ret)
  994. return TRACE_TYPE_PARTIAL_LINE;
  995. }
  996. /* Overrun */
  997. if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
  998. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  999. trace->overrun);
  1000. if (!ret)
  1001. return TRACE_TYPE_PARTIAL_LINE;
  1002. }
  1003. ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  1004. cpu, pid, flags);
  1005. if (ret == TRACE_TYPE_PARTIAL_LINE)
  1006. return TRACE_TYPE_PARTIAL_LINE;
  1007. return TRACE_TYPE_HANDLED;
  1008. }
  1009. static enum print_line_t
  1010. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  1011. struct trace_iterator *iter, u32 flags)
  1012. {
  1013. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1014. struct fgraph_data *data = iter->private;
  1015. struct trace_event *event;
  1016. int depth = 0;
  1017. int ret;
  1018. int i;
  1019. if (data)
  1020. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  1021. if (print_graph_prologue(iter, s, 0, 0, flags))
  1022. return TRACE_TYPE_PARTIAL_LINE;
  1023. /* No time */
  1024. ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
  1025. if (ret != TRACE_TYPE_HANDLED)
  1026. return ret;
  1027. /* Indentation */
  1028. if (depth > 0)
  1029. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
  1030. ret = trace_seq_putc(s, ' ');
  1031. if (!ret)
  1032. return TRACE_TYPE_PARTIAL_LINE;
  1033. }
  1034. /* The comment */
  1035. ret = trace_seq_puts(s, "/* ");
  1036. if (!ret)
  1037. return TRACE_TYPE_PARTIAL_LINE;
  1038. switch (iter->ent->type) {
  1039. case TRACE_BPRINT:
  1040. ret = trace_print_bprintk_msg_only(iter);
  1041. if (ret != TRACE_TYPE_HANDLED)
  1042. return ret;
  1043. break;
  1044. case TRACE_PRINT:
  1045. ret = trace_print_printk_msg_only(iter);
  1046. if (ret != TRACE_TYPE_HANDLED)
  1047. return ret;
  1048. break;
  1049. default:
  1050. event = ftrace_find_event(ent->type);
  1051. if (!event)
  1052. return TRACE_TYPE_UNHANDLED;
  1053. ret = event->funcs->trace(iter, sym_flags, event);
  1054. if (ret != TRACE_TYPE_HANDLED)
  1055. return ret;
  1056. }
  1057. /* Strip ending newline */
  1058. if (s->buffer[s->len - 1] == '\n') {
  1059. s->buffer[s->len - 1] = '\0';
  1060. s->len--;
  1061. }
  1062. ret = trace_seq_puts(s, " */\n");
  1063. if (!ret)
  1064. return TRACE_TYPE_PARTIAL_LINE;
  1065. return TRACE_TYPE_HANDLED;
  1066. }
  1067. enum print_line_t
  1068. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1069. {
  1070. struct ftrace_graph_ent_entry *field;
  1071. struct fgraph_data *data = iter->private;
  1072. struct trace_entry *entry = iter->ent;
  1073. struct trace_seq *s = &iter->seq;
  1074. int cpu = iter->cpu;
  1075. int ret;
  1076. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1077. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1078. return TRACE_TYPE_HANDLED;
  1079. }
  1080. /*
  1081. * If the last output failed, there's a possibility we need
  1082. * to print out the missing entry which would never go out.
  1083. */
  1084. if (data && data->failed) {
  1085. field = &data->ent;
  1086. iter->cpu = data->cpu;
  1087. ret = print_graph_entry(field, s, iter, flags);
  1088. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1089. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1090. ret = TRACE_TYPE_NO_CONSUME;
  1091. }
  1092. iter->cpu = cpu;
  1093. return ret;
  1094. }
  1095. switch (entry->type) {
  1096. case TRACE_GRAPH_ENT: {
  1097. /*
  1098. * print_graph_entry() may consume the current event,
  1099. * thus @field may become invalid, so we need to save it.
  1100. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1101. * it can be safely saved at the stack.
  1102. */
  1103. struct ftrace_graph_ent_entry saved;
  1104. trace_assign_type(field, entry);
  1105. saved = *field;
  1106. return print_graph_entry(&saved, s, iter, flags);
  1107. }
  1108. case TRACE_GRAPH_RET: {
  1109. struct ftrace_graph_ret_entry *field;
  1110. trace_assign_type(field, entry);
  1111. return print_graph_return(&field->ret, s, entry, iter, flags);
  1112. }
  1113. case TRACE_STACK:
  1114. case TRACE_FN:
  1115. /* dont trace stack and functions as comments */
  1116. return TRACE_TYPE_UNHANDLED;
  1117. default:
  1118. return print_graph_comment(s, entry, iter, flags);
  1119. }
  1120. return TRACE_TYPE_HANDLED;
  1121. }
  1122. static enum print_line_t
  1123. print_graph_function(struct trace_iterator *iter)
  1124. {
  1125. return print_graph_function_flags(iter, tracer_flags.val);
  1126. }
  1127. static enum print_line_t
  1128. print_graph_function_event(struct trace_iterator *iter, int flags,
  1129. struct trace_event *event)
  1130. {
  1131. return print_graph_function(iter);
  1132. }
  1133. static void print_lat_header(struct seq_file *s, u32 flags)
  1134. {
  1135. static const char spaces[] = " " /* 16 spaces */
  1136. " " /* 4 spaces */
  1137. " "; /* 17 spaces */
  1138. int size = 0;
  1139. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1140. size += 16;
  1141. if (flags & TRACE_GRAPH_PRINT_CPU)
  1142. size += 4;
  1143. if (flags & TRACE_GRAPH_PRINT_PROC)
  1144. size += 17;
  1145. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1146. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1147. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1148. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1149. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1150. }
  1151. static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
  1152. {
  1153. int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
  1154. if (lat)
  1155. print_lat_header(s, flags);
  1156. /* 1st line */
  1157. seq_printf(s, "#");
  1158. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1159. seq_printf(s, " TIME ");
  1160. if (flags & TRACE_GRAPH_PRINT_CPU)
  1161. seq_printf(s, " CPU");
  1162. if (flags & TRACE_GRAPH_PRINT_PROC)
  1163. seq_printf(s, " TASK/PID ");
  1164. if (lat)
  1165. seq_printf(s, "||||");
  1166. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1167. seq_printf(s, " DURATION ");
  1168. seq_printf(s, " FUNCTION CALLS\n");
  1169. /* 2nd line */
  1170. seq_printf(s, "#");
  1171. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1172. seq_printf(s, " | ");
  1173. if (flags & TRACE_GRAPH_PRINT_CPU)
  1174. seq_printf(s, " | ");
  1175. if (flags & TRACE_GRAPH_PRINT_PROC)
  1176. seq_printf(s, " | | ");
  1177. if (lat)
  1178. seq_printf(s, "||||");
  1179. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1180. seq_printf(s, " | | ");
  1181. seq_printf(s, " | | | |\n");
  1182. }
  1183. void print_graph_headers(struct seq_file *s)
  1184. {
  1185. print_graph_headers_flags(s, tracer_flags.val);
  1186. }
  1187. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1188. {
  1189. struct trace_iterator *iter = s->private;
  1190. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  1191. return;
  1192. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  1193. /* print nothing if the buffers are empty */
  1194. if (trace_empty(iter))
  1195. return;
  1196. print_trace_header(s, iter);
  1197. }
  1198. __print_graph_headers_flags(s, flags);
  1199. }
  1200. void graph_trace_open(struct trace_iterator *iter)
  1201. {
  1202. /* pid and depth on the last trace processed */
  1203. struct fgraph_data *data;
  1204. int cpu;
  1205. iter->private = NULL;
  1206. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1207. if (!data)
  1208. goto out_err;
  1209. data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
  1210. if (!data->cpu_data)
  1211. goto out_err_free;
  1212. for_each_possible_cpu(cpu) {
  1213. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1214. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1215. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1216. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1217. *pid = -1;
  1218. *depth = 0;
  1219. *ignore = 0;
  1220. *depth_irq = -1;
  1221. }
  1222. iter->private = data;
  1223. return;
  1224. out_err_free:
  1225. kfree(data);
  1226. out_err:
  1227. pr_warning("function graph tracer: not enough memory\n");
  1228. }
  1229. void graph_trace_close(struct trace_iterator *iter)
  1230. {
  1231. struct fgraph_data *data = iter->private;
  1232. if (data) {
  1233. free_percpu(data->cpu_data);
  1234. kfree(data);
  1235. }
  1236. }
  1237. static int
  1238. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1239. {
  1240. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1241. ftrace_graph_skip_irqs = !set;
  1242. return 0;
  1243. }
  1244. static struct trace_event_functions graph_functions = {
  1245. .trace = print_graph_function_event,
  1246. };
  1247. static struct trace_event graph_trace_entry_event = {
  1248. .type = TRACE_GRAPH_ENT,
  1249. .funcs = &graph_functions,
  1250. };
  1251. static struct trace_event graph_trace_ret_event = {
  1252. .type = TRACE_GRAPH_RET,
  1253. .funcs = &graph_functions
  1254. };
  1255. static struct tracer graph_trace __tracer_data = {
  1256. .name = "function_graph",
  1257. .open = graph_trace_open,
  1258. .pipe_open = graph_trace_open,
  1259. .close = graph_trace_close,
  1260. .pipe_close = graph_trace_close,
  1261. .init = graph_trace_init,
  1262. .reset = graph_trace_reset,
  1263. .print_line = print_graph_function,
  1264. .print_header = print_graph_headers,
  1265. .flags = &tracer_flags,
  1266. .set_flag = func_graph_set_flag,
  1267. #ifdef CONFIG_FTRACE_SELFTEST
  1268. .selftest = trace_selftest_startup_function_graph,
  1269. #endif
  1270. };
  1271. static ssize_t
  1272. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1273. loff_t *ppos)
  1274. {
  1275. unsigned long val;
  1276. int ret;
  1277. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1278. if (ret)
  1279. return ret;
  1280. max_depth = val;
  1281. *ppos += cnt;
  1282. return cnt;
  1283. }
  1284. static ssize_t
  1285. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1286. loff_t *ppos)
  1287. {
  1288. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1289. int n;
  1290. n = sprintf(buf, "%d\n", max_depth);
  1291. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1292. }
  1293. static const struct file_operations graph_depth_fops = {
  1294. .open = tracing_open_generic,
  1295. .write = graph_depth_write,
  1296. .read = graph_depth_read,
  1297. .llseek = generic_file_llseek,
  1298. };
  1299. static __init int init_graph_debugfs(void)
  1300. {
  1301. struct dentry *d_tracer;
  1302. d_tracer = tracing_init_dentry();
  1303. if (!d_tracer)
  1304. return 0;
  1305. trace_create_file("max_graph_depth", 0644, d_tracer,
  1306. NULL, &graph_depth_fops);
  1307. return 0;
  1308. }
  1309. fs_initcall(init_graph_debugfs);
  1310. static __init int init_graph_trace(void)
  1311. {
  1312. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1313. if (!register_ftrace_event(&graph_trace_entry_event)) {
  1314. pr_warning("Warning: could not register graph trace events\n");
  1315. return 1;
  1316. }
  1317. if (!register_ftrace_event(&graph_trace_ret_event)) {
  1318. pr_warning("Warning: could not register graph trace events\n");
  1319. return 1;
  1320. }
  1321. return register_tracer(&graph_trace);
  1322. }
  1323. core_initcall(init_graph_trace);