trace_functions_graph.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. *
  4. * Function graph tracer.
  5. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  6. * Mostly borrowed from function tracer which
  7. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  8. *
  9. */
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/slab.h>
  14. #include <linux/fs.h>
  15. #include "trace.h"
  16. #include "trace_output.h"
  17. static bool kill_ftrace_graph;
  18. /**
  19. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  20. *
  21. * ftrace_graph_stop() is called when a severe error is detected in
  22. * the function graph tracing. This function is called by the critical
  23. * paths of function graph to keep those paths from doing any more harm.
  24. */
  25. bool ftrace_graph_is_dead(void)
  26. {
  27. return kill_ftrace_graph;
  28. }
  29. /**
  30. * ftrace_graph_stop - set to permanently disable function graph tracincg
  31. *
  32. * In case of an error int function graph tracing, this is called
  33. * to try to keep function graph tracing from causing any more harm.
  34. * Usually this is pretty severe and this is called to try to at least
  35. * get a warning out to the user.
  36. */
  37. void ftrace_graph_stop(void)
  38. {
  39. kill_ftrace_graph = true;
  40. }
  41. /* When set, irq functions will be ignored */
  42. static int ftrace_graph_skip_irqs;
  43. struct fgraph_cpu_data {
  44. pid_t last_pid;
  45. int depth;
  46. int depth_irq;
  47. int ignore;
  48. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  49. };
  50. struct fgraph_data {
  51. struct fgraph_cpu_data __percpu *cpu_data;
  52. /* Place to preserve last processed entry. */
  53. struct ftrace_graph_ent_entry ent;
  54. struct ftrace_graph_ret_entry ret;
  55. int failed;
  56. int cpu;
  57. };
  58. #define TRACE_GRAPH_INDENT 2
  59. unsigned int fgraph_max_depth;
  60. static struct tracer_opt trace_opts[] = {
  61. /* Display overruns? (for self-debug purpose) */
  62. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  63. /* Display CPU ? */
  64. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  65. /* Display Overhead ? */
  66. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  67. /* Display proc name/pid */
  68. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  69. /* Display duration of execution */
  70. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  71. /* Display absolute time of an entry */
  72. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  73. /* Display interrupts */
  74. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  75. /* Display function name after trailing } */
  76. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  77. /* Include sleep time (scheduled out) between entry and return */
  78. { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  79. /* Include time within nested functions */
  80. { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  81. { } /* Empty entry */
  82. };
  83. static struct tracer_flags tracer_flags = {
  84. /* Don't display overruns, proc, or tail by default */
  85. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  86. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  87. TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  88. .opts = trace_opts
  89. };
  90. static struct trace_array *graph_array;
  91. /*
  92. * DURATION column is being also used to display IRQ signs,
  93. * following values are used by print_graph_irq and others
  94. * to fill in space into DURATION column.
  95. */
  96. enum {
  97. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  98. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  99. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  100. };
  101. static void
  102. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  103. struct trace_seq *s, u32 flags);
  104. /* Add a function return address to the trace stack on thread info.*/
  105. static int
  106. ftrace_push_return_trace(unsigned long ret, unsigned long func,
  107. unsigned long frame_pointer, unsigned long *retp)
  108. {
  109. unsigned long long calltime;
  110. int index;
  111. if (unlikely(ftrace_graph_is_dead()))
  112. return -EBUSY;
  113. if (!current->ret_stack)
  114. return -EBUSY;
  115. /*
  116. * We must make sure the ret_stack is tested before we read
  117. * anything else.
  118. */
  119. smp_rmb();
  120. /* The return trace stack is full */
  121. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  122. atomic_inc(&current->trace_overrun);
  123. return -EBUSY;
  124. }
  125. /*
  126. * The curr_ret_stack is an index to ftrace return stack of
  127. * current task. Its value should be in [0, FTRACE_RETFUNC_
  128. * DEPTH) when the function graph tracer is used. To support
  129. * filtering out specific functions, it makes the index
  130. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  131. * so when it sees a negative index the ftrace will ignore
  132. * the record. And the index gets recovered when returning
  133. * from the filtered function by adding the FTRACE_NOTRACE_
  134. * DEPTH and then it'll continue to record functions normally.
  135. *
  136. * The curr_ret_stack is initialized to -1 and get increased
  137. * in this function. So it can be less than -1 only if it was
  138. * filtered out via ftrace_graph_notrace_addr() which can be
  139. * set from set_graph_notrace file in tracefs by user.
  140. */
  141. if (current->curr_ret_stack < -1)
  142. return -EBUSY;
  143. calltime = trace_clock_local();
  144. index = ++current->curr_ret_stack;
  145. if (ftrace_graph_notrace_addr(func))
  146. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  147. barrier();
  148. current->ret_stack[index].ret = ret;
  149. current->ret_stack[index].func = func;
  150. current->ret_stack[index].calltime = calltime;
  151. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  152. current->ret_stack[index].fp = frame_pointer;
  153. #endif
  154. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  155. current->ret_stack[index].retp = retp;
  156. #endif
  157. return 0;
  158. }
  159. int function_graph_enter(unsigned long ret, unsigned long func,
  160. unsigned long frame_pointer, unsigned long *retp)
  161. {
  162. struct ftrace_graph_ent trace;
  163. trace.func = func;
  164. trace.depth = ++current->curr_ret_depth;
  165. if (ftrace_push_return_trace(ret, func,
  166. frame_pointer, retp))
  167. goto out;
  168. /* Only trace if the calling function expects to */
  169. if (!ftrace_graph_entry(&trace))
  170. goto out_ret;
  171. return 0;
  172. out_ret:
  173. current->curr_ret_stack--;
  174. out:
  175. current->curr_ret_depth--;
  176. return -EBUSY;
  177. }
  178. /* Retrieve a function return address to the trace stack on thread info.*/
  179. static void
  180. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  181. unsigned long frame_pointer)
  182. {
  183. int index;
  184. index = current->curr_ret_stack;
  185. /*
  186. * A negative index here means that it's just returned from a
  187. * notrace'd function. Recover index to get an original
  188. * return address. See ftrace_push_return_trace().
  189. *
  190. * TODO: Need to check whether the stack gets corrupted.
  191. */
  192. if (index < 0)
  193. index += FTRACE_NOTRACE_DEPTH;
  194. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  195. ftrace_graph_stop();
  196. WARN_ON(1);
  197. /* Might as well panic, otherwise we have no where to go */
  198. *ret = (unsigned long)panic;
  199. return;
  200. }
  201. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  202. /*
  203. * The arch may choose to record the frame pointer used
  204. * and check it here to make sure that it is what we expect it
  205. * to be. If gcc does not set the place holder of the return
  206. * address in the frame pointer, and does a copy instead, then
  207. * the function graph trace will fail. This test detects this
  208. * case.
  209. *
  210. * Currently, x86_32 with optimize for size (-Os) makes the latest
  211. * gcc do the above.
  212. *
  213. * Note, -mfentry does not use frame pointers, and this test
  214. * is not needed if CC_USING_FENTRY is set.
  215. */
  216. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  217. ftrace_graph_stop();
  218. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  219. " from func %ps return to %lx\n",
  220. current->ret_stack[index].fp,
  221. frame_pointer,
  222. (void *)current->ret_stack[index].func,
  223. current->ret_stack[index].ret);
  224. *ret = (unsigned long)panic;
  225. return;
  226. }
  227. #endif
  228. *ret = current->ret_stack[index].ret;
  229. trace->func = current->ret_stack[index].func;
  230. trace->calltime = current->ret_stack[index].calltime;
  231. trace->overrun = atomic_read(&current->trace_overrun);
  232. trace->depth = current->curr_ret_depth--;
  233. /*
  234. * We still want to trace interrupts coming in if
  235. * max_depth is set to 1. Make sure the decrement is
  236. * seen before ftrace_graph_return.
  237. */
  238. barrier();
  239. }
  240. /*
  241. * Send the trace to the ring-buffer.
  242. * @return the original return address.
  243. */
  244. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  245. {
  246. struct ftrace_graph_ret trace;
  247. unsigned long ret;
  248. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  249. trace.rettime = trace_clock_local();
  250. ftrace_graph_return(&trace);
  251. /*
  252. * The ftrace_graph_return() may still access the current
  253. * ret_stack structure, we need to make sure the update of
  254. * curr_ret_stack is after that.
  255. */
  256. barrier();
  257. current->curr_ret_stack--;
  258. /*
  259. * The curr_ret_stack can be less than -1 only if it was
  260. * filtered out and it's about to return from the function.
  261. * Recover the index and continue to trace normal functions.
  262. */
  263. if (current->curr_ret_stack < -1) {
  264. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  265. return ret;
  266. }
  267. if (unlikely(!ret)) {
  268. ftrace_graph_stop();
  269. WARN_ON(1);
  270. /* Might as well panic. What else to do? */
  271. ret = (unsigned long)panic;
  272. }
  273. return ret;
  274. }
  275. /**
  276. * ftrace_graph_ret_addr - convert a potentially modified stack return address
  277. * to its original value
  278. *
  279. * This function can be called by stack unwinding code to convert a found stack
  280. * return address ('ret') to its original value, in case the function graph
  281. * tracer has modified it to be 'return_to_handler'. If the address hasn't
  282. * been modified, the unchanged value of 'ret' is returned.
  283. *
  284. * 'idx' is a state variable which should be initialized by the caller to zero
  285. * before the first call.
  286. *
  287. * 'retp' is a pointer to the return address on the stack. It's ignored if
  288. * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
  289. */
  290. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  291. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  292. unsigned long ret, unsigned long *retp)
  293. {
  294. int index = task->curr_ret_stack;
  295. int i;
  296. if (ret != (unsigned long)return_to_handler)
  297. return ret;
  298. if (index < -1)
  299. index += FTRACE_NOTRACE_DEPTH;
  300. if (index < 0)
  301. return ret;
  302. for (i = 0; i <= index; i++)
  303. if (task->ret_stack[i].retp == retp)
  304. return task->ret_stack[i].ret;
  305. return ret;
  306. }
  307. #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  308. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  309. unsigned long ret, unsigned long *retp)
  310. {
  311. int task_idx;
  312. if (ret != (unsigned long)return_to_handler)
  313. return ret;
  314. task_idx = task->curr_ret_stack;
  315. if (!task->ret_stack || task_idx < *idx)
  316. return ret;
  317. task_idx -= *idx;
  318. (*idx)++;
  319. return task->ret_stack[task_idx].ret;
  320. }
  321. #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  322. int __trace_graph_entry(struct trace_array *tr,
  323. struct ftrace_graph_ent *trace,
  324. unsigned long flags,
  325. int pc)
  326. {
  327. struct trace_event_call *call = &event_funcgraph_entry;
  328. struct ring_buffer_event *event;
  329. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  330. struct ftrace_graph_ent_entry *entry;
  331. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  332. sizeof(*entry), flags, pc);
  333. if (!event)
  334. return 0;
  335. entry = ring_buffer_event_data(event);
  336. entry->graph_ent = *trace;
  337. if (!call_filter_check_discard(call, entry, buffer, event))
  338. trace_buffer_unlock_commit_nostack(buffer, event);
  339. return 1;
  340. }
  341. static inline int ftrace_graph_ignore_irqs(void)
  342. {
  343. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  344. return 0;
  345. return in_irq();
  346. }
  347. int trace_graph_entry(struct ftrace_graph_ent *trace)
  348. {
  349. struct trace_array *tr = graph_array;
  350. struct trace_array_cpu *data;
  351. unsigned long flags;
  352. long disabled;
  353. int ret;
  354. int cpu;
  355. int pc;
  356. if (!ftrace_trace_task(tr))
  357. return 0;
  358. if (ftrace_graph_ignore_func(trace))
  359. return 0;
  360. if (ftrace_graph_ignore_irqs())
  361. return 0;
  362. /*
  363. * Do not trace a function if it's filtered by set_graph_notrace.
  364. * Make the index of ret stack negative to indicate that it should
  365. * ignore further functions. But it needs its own ret stack entry
  366. * to recover the original index in order to continue tracing after
  367. * returning from the function.
  368. */
  369. if (ftrace_graph_notrace_addr(trace->func))
  370. return 1;
  371. /*
  372. * Stop here if tracing_threshold is set. We only write function return
  373. * events to the ring buffer.
  374. */
  375. if (tracing_thresh)
  376. return 1;
  377. local_irq_save(flags);
  378. cpu = raw_smp_processor_id();
  379. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  380. disabled = atomic_inc_return(&data->disabled);
  381. if (likely(disabled == 1)) {
  382. pc = preempt_count();
  383. ret = __trace_graph_entry(tr, trace, flags, pc);
  384. } else {
  385. ret = 0;
  386. }
  387. atomic_dec(&data->disabled);
  388. local_irq_restore(flags);
  389. return ret;
  390. }
  391. static void
  392. __trace_graph_function(struct trace_array *tr,
  393. unsigned long ip, unsigned long flags, int pc)
  394. {
  395. u64 time = trace_clock_local();
  396. struct ftrace_graph_ent ent = {
  397. .func = ip,
  398. .depth = 0,
  399. };
  400. struct ftrace_graph_ret ret = {
  401. .func = ip,
  402. .depth = 0,
  403. .calltime = time,
  404. .rettime = time,
  405. };
  406. __trace_graph_entry(tr, &ent, flags, pc);
  407. __trace_graph_return(tr, &ret, flags, pc);
  408. }
  409. void
  410. trace_graph_function(struct trace_array *tr,
  411. unsigned long ip, unsigned long parent_ip,
  412. unsigned long flags, int pc)
  413. {
  414. __trace_graph_function(tr, ip, flags, pc);
  415. }
  416. void __trace_graph_return(struct trace_array *tr,
  417. struct ftrace_graph_ret *trace,
  418. unsigned long flags,
  419. int pc)
  420. {
  421. struct trace_event_call *call = &event_funcgraph_exit;
  422. struct ring_buffer_event *event;
  423. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  424. struct ftrace_graph_ret_entry *entry;
  425. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  426. sizeof(*entry), flags, pc);
  427. if (!event)
  428. return;
  429. entry = ring_buffer_event_data(event);
  430. entry->ret = *trace;
  431. if (!call_filter_check_discard(call, entry, buffer, event))
  432. trace_buffer_unlock_commit_nostack(buffer, event);
  433. }
  434. void trace_graph_return(struct ftrace_graph_ret *trace)
  435. {
  436. struct trace_array *tr = graph_array;
  437. struct trace_array_cpu *data;
  438. unsigned long flags;
  439. long disabled;
  440. int cpu;
  441. int pc;
  442. local_irq_save(flags);
  443. cpu = raw_smp_processor_id();
  444. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  445. disabled = atomic_inc_return(&data->disabled);
  446. if (likely(disabled == 1)) {
  447. pc = preempt_count();
  448. __trace_graph_return(tr, trace, flags, pc);
  449. }
  450. atomic_dec(&data->disabled);
  451. local_irq_restore(flags);
  452. }
  453. void set_graph_array(struct trace_array *tr)
  454. {
  455. graph_array = tr;
  456. /* Make graph_array visible before we start tracing */
  457. smp_mb();
  458. }
  459. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  460. {
  461. if (tracing_thresh &&
  462. (trace->rettime - trace->calltime < tracing_thresh))
  463. return;
  464. else
  465. trace_graph_return(trace);
  466. }
  467. static int graph_trace_init(struct trace_array *tr)
  468. {
  469. int ret;
  470. set_graph_array(tr);
  471. if (tracing_thresh)
  472. ret = register_ftrace_graph(&trace_graph_thresh_return,
  473. &trace_graph_entry);
  474. else
  475. ret = register_ftrace_graph(&trace_graph_return,
  476. &trace_graph_entry);
  477. if (ret)
  478. return ret;
  479. tracing_start_cmdline_record();
  480. return 0;
  481. }
  482. static void graph_trace_reset(struct trace_array *tr)
  483. {
  484. tracing_stop_cmdline_record();
  485. unregister_ftrace_graph();
  486. }
  487. static int graph_trace_update_thresh(struct trace_array *tr)
  488. {
  489. graph_trace_reset(tr);
  490. return graph_trace_init(tr);
  491. }
  492. static int max_bytes_for_cpu;
  493. static void print_graph_cpu(struct trace_seq *s, int cpu)
  494. {
  495. /*
  496. * Start with a space character - to make it stand out
  497. * to the right a bit when trace output is pasted into
  498. * email:
  499. */
  500. trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  501. }
  502. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  503. static void print_graph_proc(struct trace_seq *s, pid_t pid)
  504. {
  505. char comm[TASK_COMM_LEN];
  506. /* sign + log10(MAX_INT) + '\0' */
  507. char pid_str[11];
  508. int spaces = 0;
  509. int len;
  510. int i;
  511. trace_find_cmdline(pid, comm);
  512. comm[7] = '\0';
  513. sprintf(pid_str, "%d", pid);
  514. /* 1 stands for the "-" character */
  515. len = strlen(comm) + strlen(pid_str) + 1;
  516. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  517. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  518. /* First spaces to align center */
  519. for (i = 0; i < spaces / 2; i++)
  520. trace_seq_putc(s, ' ');
  521. trace_seq_printf(s, "%s-%s", comm, pid_str);
  522. /* Last spaces to align center */
  523. for (i = 0; i < spaces - (spaces / 2); i++)
  524. trace_seq_putc(s, ' ');
  525. }
  526. static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  527. {
  528. trace_seq_putc(s, ' ');
  529. trace_print_lat_fmt(s, entry);
  530. }
  531. /* If the pid changed since the last trace, output this event */
  532. static void
  533. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  534. {
  535. pid_t prev_pid;
  536. pid_t *last_pid;
  537. if (!data)
  538. return;
  539. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  540. if (*last_pid == pid)
  541. return;
  542. prev_pid = *last_pid;
  543. *last_pid = pid;
  544. if (prev_pid == -1)
  545. return;
  546. /*
  547. * Context-switch trace line:
  548. ------------------------------------------
  549. | 1) migration/0--1 => sshd-1755
  550. ------------------------------------------
  551. */
  552. trace_seq_puts(s, " ------------------------------------------\n");
  553. print_graph_cpu(s, cpu);
  554. print_graph_proc(s, prev_pid);
  555. trace_seq_puts(s, " => ");
  556. print_graph_proc(s, pid);
  557. trace_seq_puts(s, "\n ------------------------------------------\n\n");
  558. }
  559. static struct ftrace_graph_ret_entry *
  560. get_return_for_leaf(struct trace_iterator *iter,
  561. struct ftrace_graph_ent_entry *curr)
  562. {
  563. struct fgraph_data *data = iter->private;
  564. struct ring_buffer_iter *ring_iter = NULL;
  565. struct ring_buffer_event *event;
  566. struct ftrace_graph_ret_entry *next;
  567. /*
  568. * If the previous output failed to write to the seq buffer,
  569. * then we just reuse the data from before.
  570. */
  571. if (data && data->failed) {
  572. curr = &data->ent;
  573. next = &data->ret;
  574. } else {
  575. ring_iter = trace_buffer_iter(iter, iter->cpu);
  576. /* First peek to compare current entry and the next one */
  577. if (ring_iter)
  578. event = ring_buffer_iter_peek(ring_iter, NULL);
  579. else {
  580. /*
  581. * We need to consume the current entry to see
  582. * the next one.
  583. */
  584. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  585. NULL, NULL);
  586. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  587. NULL, NULL);
  588. }
  589. if (!event)
  590. return NULL;
  591. next = ring_buffer_event_data(event);
  592. if (data) {
  593. /*
  594. * Save current and next entries for later reference
  595. * if the output fails.
  596. */
  597. data->ent = *curr;
  598. /*
  599. * If the next event is not a return type, then
  600. * we only care about what type it is. Otherwise we can
  601. * safely copy the entire event.
  602. */
  603. if (next->ent.type == TRACE_GRAPH_RET)
  604. data->ret = *next;
  605. else
  606. data->ret.ent.type = next->ent.type;
  607. }
  608. }
  609. if (next->ent.type != TRACE_GRAPH_RET)
  610. return NULL;
  611. if (curr->ent.pid != next->ent.pid ||
  612. curr->graph_ent.func != next->ret.func)
  613. return NULL;
  614. /* this is a leaf, now advance the iterator */
  615. if (ring_iter)
  616. ring_buffer_read(ring_iter, NULL);
  617. return next;
  618. }
  619. static void print_graph_abs_time(u64 t, struct trace_seq *s)
  620. {
  621. unsigned long usecs_rem;
  622. usecs_rem = do_div(t, NSEC_PER_SEC);
  623. usecs_rem /= 1000;
  624. trace_seq_printf(s, "%5lu.%06lu | ",
  625. (unsigned long)t, usecs_rem);
  626. }
  627. static void
  628. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  629. enum trace_type type, int cpu, pid_t pid, u32 flags)
  630. {
  631. struct trace_array *tr = iter->tr;
  632. struct trace_seq *s = &iter->seq;
  633. struct trace_entry *ent = iter->ent;
  634. if (addr < (unsigned long)__irqentry_text_start ||
  635. addr >= (unsigned long)__irqentry_text_end)
  636. return;
  637. if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
  638. /* Absolute time */
  639. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  640. print_graph_abs_time(iter->ts, s);
  641. /* Cpu */
  642. if (flags & TRACE_GRAPH_PRINT_CPU)
  643. print_graph_cpu(s, cpu);
  644. /* Proc */
  645. if (flags & TRACE_GRAPH_PRINT_PROC) {
  646. print_graph_proc(s, pid);
  647. trace_seq_puts(s, " | ");
  648. }
  649. /* Latency format */
  650. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  651. print_graph_lat_fmt(s, ent);
  652. }
  653. /* No overhead */
  654. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
  655. if (type == TRACE_GRAPH_ENT)
  656. trace_seq_puts(s, "==========>");
  657. else
  658. trace_seq_puts(s, "<==========");
  659. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
  660. trace_seq_putc(s, '\n');
  661. }
  662. void
  663. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  664. {
  665. unsigned long nsecs_rem = do_div(duration, 1000);
  666. /* log10(ULONG_MAX) + '\0' */
  667. char usecs_str[21];
  668. char nsecs_str[5];
  669. int len;
  670. int i;
  671. sprintf(usecs_str, "%lu", (unsigned long) duration);
  672. /* Print msecs */
  673. trace_seq_printf(s, "%s", usecs_str);
  674. len = strlen(usecs_str);
  675. /* Print nsecs (we don't want to exceed 7 numbers) */
  676. if (len < 7) {
  677. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  678. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  679. trace_seq_printf(s, ".%s", nsecs_str);
  680. len += strlen(nsecs_str) + 1;
  681. }
  682. trace_seq_puts(s, " us ");
  683. /* Print remaining spaces to fit the row's width */
  684. for (i = len; i < 8; i++)
  685. trace_seq_putc(s, ' ');
  686. }
  687. static void
  688. print_graph_duration(struct trace_array *tr, unsigned long long duration,
  689. struct trace_seq *s, u32 flags)
  690. {
  691. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  692. !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  693. return;
  694. /* No real adata, just filling the column with spaces */
  695. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  696. case FLAGS_FILL_FULL:
  697. trace_seq_puts(s, " | ");
  698. return;
  699. case FLAGS_FILL_START:
  700. trace_seq_puts(s, " ");
  701. return;
  702. case FLAGS_FILL_END:
  703. trace_seq_puts(s, " |");
  704. return;
  705. }
  706. /* Signal a overhead of time execution to the output */
  707. if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
  708. trace_seq_printf(s, "%c ", trace_find_mark(duration));
  709. else
  710. trace_seq_puts(s, " ");
  711. trace_print_graph_duration(duration, s);
  712. trace_seq_puts(s, "| ");
  713. }
  714. /* Case of a leaf function on its call entry */
  715. static enum print_line_t
  716. print_graph_entry_leaf(struct trace_iterator *iter,
  717. struct ftrace_graph_ent_entry *entry,
  718. struct ftrace_graph_ret_entry *ret_entry,
  719. struct trace_seq *s, u32 flags)
  720. {
  721. struct fgraph_data *data = iter->private;
  722. struct trace_array *tr = iter->tr;
  723. struct ftrace_graph_ret *graph_ret;
  724. struct ftrace_graph_ent *call;
  725. unsigned long long duration;
  726. int cpu = iter->cpu;
  727. int i;
  728. graph_ret = &ret_entry->ret;
  729. call = &entry->graph_ent;
  730. duration = graph_ret->rettime - graph_ret->calltime;
  731. if (data) {
  732. struct fgraph_cpu_data *cpu_data;
  733. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  734. /* If a graph tracer ignored set_graph_notrace */
  735. if (call->depth < -1)
  736. call->depth += FTRACE_NOTRACE_DEPTH;
  737. /*
  738. * Comments display at + 1 to depth. Since
  739. * this is a leaf function, keep the comments
  740. * equal to this depth.
  741. */
  742. cpu_data->depth = call->depth - 1;
  743. /* No need to keep this function around for this depth */
  744. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  745. !WARN_ON_ONCE(call->depth < 0))
  746. cpu_data->enter_funcs[call->depth] = 0;
  747. }
  748. /* Overhead and duration */
  749. print_graph_duration(tr, duration, s, flags);
  750. /* Function */
  751. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  752. trace_seq_putc(s, ' ');
  753. trace_seq_printf(s, "%ps();\n", (void *)call->func);
  754. print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
  755. cpu, iter->ent->pid, flags);
  756. return trace_handle_return(s);
  757. }
  758. static enum print_line_t
  759. print_graph_entry_nested(struct trace_iterator *iter,
  760. struct ftrace_graph_ent_entry *entry,
  761. struct trace_seq *s, int cpu, u32 flags)
  762. {
  763. struct ftrace_graph_ent *call = &entry->graph_ent;
  764. struct fgraph_data *data = iter->private;
  765. struct trace_array *tr = iter->tr;
  766. int i;
  767. if (data) {
  768. struct fgraph_cpu_data *cpu_data;
  769. int cpu = iter->cpu;
  770. /* If a graph tracer ignored set_graph_notrace */
  771. if (call->depth < -1)
  772. call->depth += FTRACE_NOTRACE_DEPTH;
  773. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  774. cpu_data->depth = call->depth;
  775. /* Save this function pointer to see if the exit matches */
  776. if (call->depth < FTRACE_RETFUNC_DEPTH &&
  777. !WARN_ON_ONCE(call->depth < 0))
  778. cpu_data->enter_funcs[call->depth] = call->func;
  779. }
  780. /* No time */
  781. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  782. /* Function */
  783. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
  784. trace_seq_putc(s, ' ');
  785. trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  786. if (trace_seq_has_overflowed(s))
  787. return TRACE_TYPE_PARTIAL_LINE;
  788. /*
  789. * we already consumed the current entry to check the next one
  790. * and see if this is a leaf.
  791. */
  792. return TRACE_TYPE_NO_CONSUME;
  793. }
  794. static void
  795. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  796. int type, unsigned long addr, u32 flags)
  797. {
  798. struct fgraph_data *data = iter->private;
  799. struct trace_entry *ent = iter->ent;
  800. struct trace_array *tr = iter->tr;
  801. int cpu = iter->cpu;
  802. /* Pid */
  803. verif_pid(s, ent->pid, cpu, data);
  804. if (type)
  805. /* Interrupt */
  806. print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  807. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  808. return;
  809. /* Absolute time */
  810. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  811. print_graph_abs_time(iter->ts, s);
  812. /* Cpu */
  813. if (flags & TRACE_GRAPH_PRINT_CPU)
  814. print_graph_cpu(s, cpu);
  815. /* Proc */
  816. if (flags & TRACE_GRAPH_PRINT_PROC) {
  817. print_graph_proc(s, ent->pid);
  818. trace_seq_puts(s, " | ");
  819. }
  820. /* Latency format */
  821. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
  822. print_graph_lat_fmt(s, ent);
  823. return;
  824. }
  825. /*
  826. * Entry check for irq code
  827. *
  828. * returns 1 if
  829. * - we are inside irq code
  830. * - we just entered irq code
  831. *
  832. * retunns 0 if
  833. * - funcgraph-interrupts option is set
  834. * - we are not inside irq code
  835. */
  836. static int
  837. check_irq_entry(struct trace_iterator *iter, u32 flags,
  838. unsigned long addr, int depth)
  839. {
  840. int cpu = iter->cpu;
  841. int *depth_irq;
  842. struct fgraph_data *data = iter->private;
  843. /*
  844. * If we are either displaying irqs, or we got called as
  845. * a graph event and private data does not exist,
  846. * then we bypass the irq check.
  847. */
  848. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  849. (!data))
  850. return 0;
  851. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  852. /*
  853. * We are inside the irq code
  854. */
  855. if (*depth_irq >= 0)
  856. return 1;
  857. if ((addr < (unsigned long)__irqentry_text_start) ||
  858. (addr >= (unsigned long)__irqentry_text_end))
  859. return 0;
  860. /*
  861. * We are entering irq code.
  862. */
  863. *depth_irq = depth;
  864. return 1;
  865. }
  866. /*
  867. * Return check for irq code
  868. *
  869. * returns 1 if
  870. * - we are inside irq code
  871. * - we just left irq code
  872. *
  873. * returns 0 if
  874. * - funcgraph-interrupts option is set
  875. * - we are not inside irq code
  876. */
  877. static int
  878. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  879. {
  880. int cpu = iter->cpu;
  881. int *depth_irq;
  882. struct fgraph_data *data = iter->private;
  883. /*
  884. * If we are either displaying irqs, or we got called as
  885. * a graph event and private data does not exist,
  886. * then we bypass the irq check.
  887. */
  888. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  889. (!data))
  890. return 0;
  891. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  892. /*
  893. * We are not inside the irq code.
  894. */
  895. if (*depth_irq == -1)
  896. return 0;
  897. /*
  898. * We are inside the irq code, and this is returning entry.
  899. * Let's not trace it and clear the entry depth, since
  900. * we are out of irq code.
  901. *
  902. * This condition ensures that we 'leave the irq code' once
  903. * we are out of the entry depth. Thus protecting us from
  904. * the RETURN entry loss.
  905. */
  906. if (*depth_irq >= depth) {
  907. *depth_irq = -1;
  908. return 1;
  909. }
  910. /*
  911. * We are inside the irq code, and this is not the entry.
  912. */
  913. return 1;
  914. }
  915. static enum print_line_t
  916. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  917. struct trace_iterator *iter, u32 flags)
  918. {
  919. struct fgraph_data *data = iter->private;
  920. struct ftrace_graph_ent *call = &field->graph_ent;
  921. struct ftrace_graph_ret_entry *leaf_ret;
  922. static enum print_line_t ret;
  923. int cpu = iter->cpu;
  924. if (check_irq_entry(iter, flags, call->func, call->depth))
  925. return TRACE_TYPE_HANDLED;
  926. print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
  927. leaf_ret = get_return_for_leaf(iter, field);
  928. if (leaf_ret)
  929. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  930. else
  931. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  932. if (data) {
  933. /*
  934. * If we failed to write our output, then we need to make
  935. * note of it. Because we already consumed our entry.
  936. */
  937. if (s->full) {
  938. data->failed = 1;
  939. data->cpu = cpu;
  940. } else
  941. data->failed = 0;
  942. }
  943. return ret;
  944. }
  945. static enum print_line_t
  946. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  947. struct trace_entry *ent, struct trace_iterator *iter,
  948. u32 flags)
  949. {
  950. unsigned long long duration = trace->rettime - trace->calltime;
  951. struct fgraph_data *data = iter->private;
  952. struct trace_array *tr = iter->tr;
  953. pid_t pid = ent->pid;
  954. int cpu = iter->cpu;
  955. int func_match = 1;
  956. int i;
  957. if (check_irq_return(iter, flags, trace->depth))
  958. return TRACE_TYPE_HANDLED;
  959. if (data) {
  960. struct fgraph_cpu_data *cpu_data;
  961. int cpu = iter->cpu;
  962. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  963. /*
  964. * Comments display at + 1 to depth. This is the
  965. * return from a function, we now want the comments
  966. * to display at the same level of the bracket.
  967. */
  968. cpu_data->depth = trace->depth - 1;
  969. if (trace->depth < FTRACE_RETFUNC_DEPTH &&
  970. !WARN_ON_ONCE(trace->depth < 0)) {
  971. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  972. func_match = 0;
  973. cpu_data->enter_funcs[trace->depth] = 0;
  974. }
  975. }
  976. print_graph_prologue(iter, s, 0, 0, flags);
  977. /* Overhead and duration */
  978. print_graph_duration(tr, duration, s, flags);
  979. /* Closing brace */
  980. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
  981. trace_seq_putc(s, ' ');
  982. /*
  983. * If the return function does not have a matching entry,
  984. * then the entry was lost. Instead of just printing
  985. * the '}' and letting the user guess what function this
  986. * belongs to, write out the function name. Always do
  987. * that if the funcgraph-tail option is enabled.
  988. */
  989. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
  990. trace_seq_puts(s, "}\n");
  991. else
  992. trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  993. /* Overrun */
  994. if (flags & TRACE_GRAPH_PRINT_OVERRUN)
  995. trace_seq_printf(s, " (Overruns: %lu)\n",
  996. trace->overrun);
  997. print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  998. cpu, pid, flags);
  999. return trace_handle_return(s);
  1000. }
  1001. static enum print_line_t
  1002. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  1003. struct trace_iterator *iter, u32 flags)
  1004. {
  1005. struct trace_array *tr = iter->tr;
  1006. unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
  1007. struct fgraph_data *data = iter->private;
  1008. struct trace_event *event;
  1009. int depth = 0;
  1010. int ret;
  1011. int i;
  1012. if (data)
  1013. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  1014. print_graph_prologue(iter, s, 0, 0, flags);
  1015. /* No time */
  1016. print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
  1017. /* Indentation */
  1018. if (depth > 0)
  1019. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
  1020. trace_seq_putc(s, ' ');
  1021. /* The comment */
  1022. trace_seq_puts(s, "/* ");
  1023. switch (iter->ent->type) {
  1024. case TRACE_BPUTS:
  1025. ret = trace_print_bputs_msg_only(iter);
  1026. if (ret != TRACE_TYPE_HANDLED)
  1027. return ret;
  1028. break;
  1029. case TRACE_BPRINT:
  1030. ret = trace_print_bprintk_msg_only(iter);
  1031. if (ret != TRACE_TYPE_HANDLED)
  1032. return ret;
  1033. break;
  1034. case TRACE_PRINT:
  1035. ret = trace_print_printk_msg_only(iter);
  1036. if (ret != TRACE_TYPE_HANDLED)
  1037. return ret;
  1038. break;
  1039. default:
  1040. event = ftrace_find_event(ent->type);
  1041. if (!event)
  1042. return TRACE_TYPE_UNHANDLED;
  1043. ret = event->funcs->trace(iter, sym_flags, event);
  1044. if (ret != TRACE_TYPE_HANDLED)
  1045. return ret;
  1046. }
  1047. if (trace_seq_has_overflowed(s))
  1048. goto out;
  1049. /* Strip ending newline */
  1050. if (s->buffer[s->seq.len - 1] == '\n') {
  1051. s->buffer[s->seq.len - 1] = '\0';
  1052. s->seq.len--;
  1053. }
  1054. trace_seq_puts(s, " */\n");
  1055. out:
  1056. return trace_handle_return(s);
  1057. }
  1058. enum print_line_t
  1059. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1060. {
  1061. struct ftrace_graph_ent_entry *field;
  1062. struct fgraph_data *data = iter->private;
  1063. struct trace_entry *entry = iter->ent;
  1064. struct trace_seq *s = &iter->seq;
  1065. int cpu = iter->cpu;
  1066. int ret;
  1067. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1068. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1069. return TRACE_TYPE_HANDLED;
  1070. }
  1071. /*
  1072. * If the last output failed, there's a possibility we need
  1073. * to print out the missing entry which would never go out.
  1074. */
  1075. if (data && data->failed) {
  1076. field = &data->ent;
  1077. iter->cpu = data->cpu;
  1078. ret = print_graph_entry(field, s, iter, flags);
  1079. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1080. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1081. ret = TRACE_TYPE_NO_CONSUME;
  1082. }
  1083. iter->cpu = cpu;
  1084. return ret;
  1085. }
  1086. switch (entry->type) {
  1087. case TRACE_GRAPH_ENT: {
  1088. /*
  1089. * print_graph_entry() may consume the current event,
  1090. * thus @field may become invalid, so we need to save it.
  1091. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1092. * it can be safely saved at the stack.
  1093. */
  1094. struct ftrace_graph_ent_entry saved;
  1095. trace_assign_type(field, entry);
  1096. saved = *field;
  1097. return print_graph_entry(&saved, s, iter, flags);
  1098. }
  1099. case TRACE_GRAPH_RET: {
  1100. struct ftrace_graph_ret_entry *field;
  1101. trace_assign_type(field, entry);
  1102. return print_graph_return(&field->ret, s, entry, iter, flags);
  1103. }
  1104. case TRACE_STACK:
  1105. case TRACE_FN:
  1106. /* dont trace stack and functions as comments */
  1107. return TRACE_TYPE_UNHANDLED;
  1108. default:
  1109. return print_graph_comment(s, entry, iter, flags);
  1110. }
  1111. return TRACE_TYPE_HANDLED;
  1112. }
  1113. static enum print_line_t
  1114. print_graph_function(struct trace_iterator *iter)
  1115. {
  1116. return print_graph_function_flags(iter, tracer_flags.val);
  1117. }
  1118. static enum print_line_t
  1119. print_graph_function_event(struct trace_iterator *iter, int flags,
  1120. struct trace_event *event)
  1121. {
  1122. return print_graph_function(iter);
  1123. }
  1124. static void print_lat_header(struct seq_file *s, u32 flags)
  1125. {
  1126. static const char spaces[] = " " /* 16 spaces */
  1127. " " /* 4 spaces */
  1128. " "; /* 17 spaces */
  1129. int size = 0;
  1130. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1131. size += 16;
  1132. if (flags & TRACE_GRAPH_PRINT_CPU)
  1133. size += 4;
  1134. if (flags & TRACE_GRAPH_PRINT_PROC)
  1135. size += 17;
  1136. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1137. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1138. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1139. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1140. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1141. }
  1142. static void __print_graph_headers_flags(struct trace_array *tr,
  1143. struct seq_file *s, u32 flags)
  1144. {
  1145. int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
  1146. if (lat)
  1147. print_lat_header(s, flags);
  1148. /* 1st line */
  1149. seq_putc(s, '#');
  1150. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1151. seq_puts(s, " TIME ");
  1152. if (flags & TRACE_GRAPH_PRINT_CPU)
  1153. seq_puts(s, " CPU");
  1154. if (flags & TRACE_GRAPH_PRINT_PROC)
  1155. seq_puts(s, " TASK/PID ");
  1156. if (lat)
  1157. seq_puts(s, "||||");
  1158. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1159. seq_puts(s, " DURATION ");
  1160. seq_puts(s, " FUNCTION CALLS\n");
  1161. /* 2nd line */
  1162. seq_putc(s, '#');
  1163. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1164. seq_puts(s, " | ");
  1165. if (flags & TRACE_GRAPH_PRINT_CPU)
  1166. seq_puts(s, " | ");
  1167. if (flags & TRACE_GRAPH_PRINT_PROC)
  1168. seq_puts(s, " | | ");
  1169. if (lat)
  1170. seq_puts(s, "||||");
  1171. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1172. seq_puts(s, " | | ");
  1173. seq_puts(s, " | | | |\n");
  1174. }
  1175. static void print_graph_headers(struct seq_file *s)
  1176. {
  1177. print_graph_headers_flags(s, tracer_flags.val);
  1178. }
  1179. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1180. {
  1181. struct trace_iterator *iter = s->private;
  1182. struct trace_array *tr = iter->tr;
  1183. if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
  1184. return;
  1185. if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
  1186. /* print nothing if the buffers are empty */
  1187. if (trace_empty(iter))
  1188. return;
  1189. print_trace_header(s, iter);
  1190. }
  1191. __print_graph_headers_flags(tr, s, flags);
  1192. }
  1193. void graph_trace_open(struct trace_iterator *iter)
  1194. {
  1195. /* pid and depth on the last trace processed */
  1196. struct fgraph_data *data;
  1197. gfp_t gfpflags;
  1198. int cpu;
  1199. iter->private = NULL;
  1200. /* We can be called in atomic context via ftrace_dump() */
  1201. gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  1202. data = kzalloc(sizeof(*data), gfpflags);
  1203. if (!data)
  1204. goto out_err;
  1205. data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
  1206. if (!data->cpu_data)
  1207. goto out_err_free;
  1208. for_each_possible_cpu(cpu) {
  1209. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1210. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1211. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1212. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1213. *pid = -1;
  1214. *depth = 0;
  1215. *ignore = 0;
  1216. *depth_irq = -1;
  1217. }
  1218. iter->private = data;
  1219. return;
  1220. out_err_free:
  1221. kfree(data);
  1222. out_err:
  1223. pr_warn("function graph tracer: not enough memory\n");
  1224. }
  1225. void graph_trace_close(struct trace_iterator *iter)
  1226. {
  1227. struct fgraph_data *data = iter->private;
  1228. if (data) {
  1229. free_percpu(data->cpu_data);
  1230. kfree(data);
  1231. }
  1232. }
  1233. static int
  1234. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1235. {
  1236. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1237. ftrace_graph_skip_irqs = !set;
  1238. if (bit == TRACE_GRAPH_SLEEP_TIME)
  1239. ftrace_graph_sleep_time_control(set);
  1240. if (bit == TRACE_GRAPH_GRAPH_TIME)
  1241. ftrace_graph_graph_time_control(set);
  1242. return 0;
  1243. }
  1244. static struct trace_event_functions graph_functions = {
  1245. .trace = print_graph_function_event,
  1246. };
  1247. static struct trace_event graph_trace_entry_event = {
  1248. .type = TRACE_GRAPH_ENT,
  1249. .funcs = &graph_functions,
  1250. };
  1251. static struct trace_event graph_trace_ret_event = {
  1252. .type = TRACE_GRAPH_RET,
  1253. .funcs = &graph_functions
  1254. };
  1255. static struct tracer graph_trace __tracer_data = {
  1256. .name = "function_graph",
  1257. .update_thresh = graph_trace_update_thresh,
  1258. .open = graph_trace_open,
  1259. .pipe_open = graph_trace_open,
  1260. .close = graph_trace_close,
  1261. .pipe_close = graph_trace_close,
  1262. .init = graph_trace_init,
  1263. .reset = graph_trace_reset,
  1264. .print_line = print_graph_function,
  1265. .print_header = print_graph_headers,
  1266. .flags = &tracer_flags,
  1267. .set_flag = func_graph_set_flag,
  1268. #ifdef CONFIG_FTRACE_SELFTEST
  1269. .selftest = trace_selftest_startup_function_graph,
  1270. #endif
  1271. };
  1272. static ssize_t
  1273. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1274. loff_t *ppos)
  1275. {
  1276. unsigned long val;
  1277. int ret;
  1278. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1279. if (ret)
  1280. return ret;
  1281. fgraph_max_depth = val;
  1282. *ppos += cnt;
  1283. return cnt;
  1284. }
  1285. static ssize_t
  1286. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1287. loff_t *ppos)
  1288. {
  1289. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1290. int n;
  1291. n = sprintf(buf, "%d\n", fgraph_max_depth);
  1292. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1293. }
  1294. static const struct file_operations graph_depth_fops = {
  1295. .open = tracing_open_generic,
  1296. .write = graph_depth_write,
  1297. .read = graph_depth_read,
  1298. .llseek = generic_file_llseek,
  1299. };
  1300. static __init int init_graph_tracefs(void)
  1301. {
  1302. struct dentry *d_tracer;
  1303. d_tracer = tracing_init_dentry();
  1304. if (IS_ERR(d_tracer))
  1305. return 0;
  1306. trace_create_file("max_graph_depth", 0644, d_tracer,
  1307. NULL, &graph_depth_fops);
  1308. return 0;
  1309. }
  1310. fs_initcall(init_graph_tracefs);
  1311. static __init int init_graph_trace(void)
  1312. {
  1313. max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
  1314. if (!register_trace_event(&graph_trace_entry_event)) {
  1315. pr_warn("Warning: could not register graph trace events\n");
  1316. return 1;
  1317. }
  1318. if (!register_trace_event(&graph_trace_ret_event)) {
  1319. pr_warn("Warning: could not register graph trace events\n");
  1320. return 1;
  1321. }
  1322. return register_tracer(&graph_trace);
  1323. }
  1324. core_initcall(init_graph_trace);