trace_functions_graph.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/fs.h>
  13. #include "trace.h"
  14. #define TRACE_GRAPH_INDENT 2
  15. /* Spaces between function call and time duration */
  16. #define TRACE_GRAPH_TIMESPACE_ENTRY " "
  17. /* Spaces between function call and closing braces */
  18. #define TRACE_GRAPH_TIMESPACE_RET " "
  19. #define TRACE_GRAPH_PRINT_OVERRUN 0x1
  20. static struct tracer_opt trace_opts[] = {
  21. /* Display overruns or not */
  22. { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  23. { } /* Empty entry */
  24. };
  25. static struct tracer_flags tracer_flags = {
  26. .val = 0, /* Don't display overruns by default */
  27. .opts = trace_opts
  28. };
  29. /* pid on the last trace processed */
  30. static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
  31. static int graph_trace_init(struct trace_array *tr)
  32. {
  33. int cpu, ret;
  34. for_each_online_cpu(cpu)
  35. tracing_reset(tr, cpu);
  36. ret = register_ftrace_graph(&trace_graph_return,
  37. &trace_graph_entry);
  38. if (ret)
  39. return ret;
  40. tracing_start_cmdline_record();
  41. return 0;
  42. }
  43. static void graph_trace_reset(struct trace_array *tr)
  44. {
  45. tracing_stop_cmdline_record();
  46. unregister_ftrace_graph();
  47. }
  48. /* If the pid changed since the last trace, output this event */
  49. static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
  50. {
  51. char *comm;
  52. if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
  53. return 1;
  54. last_pid[cpu] = pid;
  55. comm = trace_find_cmdline(pid);
  56. return trace_seq_printf(s, "\nCPU[%03d] "
  57. " ------------8<---------- thread %s-%d"
  58. " ------------8<----------\n\n",
  59. cpu, comm, pid);
  60. }
  61. static bool
  62. trace_branch_is_leaf(struct trace_iterator *iter,
  63. struct ftrace_graph_ent_entry *curr)
  64. {
  65. struct ring_buffer_iter *ring_iter;
  66. struct ring_buffer_event *event;
  67. struct ftrace_graph_ret_entry *next;
  68. ring_iter = iter->buffer_iter[iter->cpu];
  69. if (!ring_iter)
  70. return false;
  71. event = ring_buffer_iter_peek(iter->buffer_iter[iter->cpu], NULL);
  72. if (!event)
  73. return false;
  74. next = ring_buffer_event_data(event);
  75. if (next->ent.type != TRACE_GRAPH_RET)
  76. return false;
  77. if (curr->ent.pid != next->ent.pid ||
  78. curr->graph_ent.func != next->ret.func)
  79. return false;
  80. return true;
  81. }
  82. static inline int
  83. print_graph_duration(unsigned long long duration, struct trace_seq *s)
  84. {
  85. unsigned long nsecs_rem = do_div(duration, 1000);
  86. return trace_seq_printf(s, "+ %llu.%lu us\n", duration, nsecs_rem);
  87. }
  88. /* Signal a overhead of time execution to the output */
  89. static int
  90. print_graph_overhead(unsigned long long duration, struct trace_seq *s)
  91. {
  92. /* Duration exceeded 100 msecs */
  93. if (duration > 100000ULL)
  94. return trace_seq_printf(s, "! ");
  95. /* Duration exceeded 10 msecs */
  96. if (duration > 10000ULL)
  97. return trace_seq_printf(s, "+ ");
  98. return trace_seq_printf(s, " ");
  99. }
  100. /* Case of a leaf function on its call entry */
  101. static enum print_line_t
  102. print_graph_entry_leaf(struct trace_iterator *iter,
  103. struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
  104. {
  105. struct ftrace_graph_ret_entry *ret_entry;
  106. struct ftrace_graph_ret *graph_ret;
  107. struct ring_buffer_event *event;
  108. struct ftrace_graph_ent *call;
  109. unsigned long long duration;
  110. int i;
  111. int ret;
  112. event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  113. ret_entry = ring_buffer_event_data(event);
  114. graph_ret = &ret_entry->ret;
  115. call = &entry->graph_ent;
  116. duration = graph_ret->rettime - graph_ret->calltime;
  117. /* Overhead */
  118. ret = print_graph_overhead(duration, s);
  119. if (!ret)
  120. return TRACE_TYPE_PARTIAL_LINE;
  121. /* Function */
  122. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  123. ret = trace_seq_printf(s, " ");
  124. if (!ret)
  125. return TRACE_TYPE_PARTIAL_LINE;
  126. }
  127. ret = seq_print_ip_sym(s, call->func, 0);
  128. if (!ret)
  129. return TRACE_TYPE_PARTIAL_LINE;
  130. ret = trace_seq_printf(s, "();");
  131. if (!ret)
  132. return TRACE_TYPE_PARTIAL_LINE;
  133. /* Duration */
  134. ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY);
  135. if (!ret)
  136. return TRACE_TYPE_PARTIAL_LINE;
  137. ret = print_graph_duration(duration, s);
  138. if (!ret)
  139. return TRACE_TYPE_PARTIAL_LINE;
  140. return TRACE_TYPE_HANDLED;
  141. }
  142. static enum print_line_t
  143. print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
  144. struct trace_seq *s)
  145. {
  146. int i;
  147. int ret;
  148. struct ftrace_graph_ent *call = &entry->graph_ent;
  149. /* No overhead */
  150. ret = trace_seq_printf(s, " ");
  151. if (!ret)
  152. return TRACE_TYPE_PARTIAL_LINE;
  153. /* Function */
  154. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  155. ret = trace_seq_printf(s, " ");
  156. if (!ret)
  157. return TRACE_TYPE_PARTIAL_LINE;
  158. }
  159. ret = seq_print_ip_sym(s, call->func, 0);
  160. if (!ret)
  161. return TRACE_TYPE_PARTIAL_LINE;
  162. ret = trace_seq_printf(s, "() {");
  163. if (!ret)
  164. return TRACE_TYPE_PARTIAL_LINE;
  165. /* No duration to print at this state */
  166. ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY "-\n");
  167. if (!ret)
  168. return TRACE_TYPE_PARTIAL_LINE;
  169. return TRACE_TYPE_HANDLED;
  170. }
  171. static enum print_line_t
  172. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  173. struct trace_iterator *iter, int cpu)
  174. {
  175. int ret;
  176. struct trace_entry *ent = iter->ent;
  177. if (!verif_pid(s, ent->pid, cpu))
  178. return TRACE_TYPE_PARTIAL_LINE;
  179. ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
  180. if (!ret)
  181. return TRACE_TYPE_PARTIAL_LINE;
  182. if (trace_branch_is_leaf(iter, field))
  183. return print_graph_entry_leaf(iter, field, s);
  184. else
  185. return print_graph_entry_nested(field, s);
  186. }
  187. static enum print_line_t
  188. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  189. struct trace_entry *ent, int cpu)
  190. {
  191. int i;
  192. int ret;
  193. unsigned long long duration = trace->rettime - trace->calltime;
  194. /* Pid */
  195. if (!verif_pid(s, ent->pid, cpu))
  196. return TRACE_TYPE_PARTIAL_LINE;
  197. /* Cpu */
  198. ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
  199. if (!ret)
  200. return TRACE_TYPE_PARTIAL_LINE;
  201. /* Overhead */
  202. ret = print_graph_overhead(duration, s);
  203. if (!ret)
  204. return TRACE_TYPE_PARTIAL_LINE;
  205. /* Closing brace */
  206. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  207. ret = trace_seq_printf(s, " ");
  208. if (!ret)
  209. return TRACE_TYPE_PARTIAL_LINE;
  210. }
  211. ret = trace_seq_printf(s, "} ");
  212. if (!ret)
  213. return TRACE_TYPE_PARTIAL_LINE;
  214. /* Duration */
  215. ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_RET);
  216. if (!ret)
  217. return TRACE_TYPE_PARTIAL_LINE;
  218. ret = print_graph_duration(duration, s);
  219. if (!ret)
  220. return TRACE_TYPE_PARTIAL_LINE;
  221. /* Overrun */
  222. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
  223. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  224. trace->overrun);
  225. if (!ret)
  226. return TRACE_TYPE_PARTIAL_LINE;
  227. }
  228. return TRACE_TYPE_HANDLED;
  229. }
  230. enum print_line_t
  231. print_graph_function(struct trace_iterator *iter)
  232. {
  233. struct trace_seq *s = &iter->seq;
  234. struct trace_entry *entry = iter->ent;
  235. switch (entry->type) {
  236. case TRACE_GRAPH_ENT: {
  237. struct ftrace_graph_ent_entry *field;
  238. trace_assign_type(field, entry);
  239. return print_graph_entry(field, s, iter,
  240. iter->cpu);
  241. }
  242. case TRACE_GRAPH_RET: {
  243. struct ftrace_graph_ret_entry *field;
  244. trace_assign_type(field, entry);
  245. return print_graph_return(&field->ret, s, entry, iter->cpu);
  246. }
  247. default:
  248. return TRACE_TYPE_UNHANDLED;
  249. }
  250. }
  251. static struct tracer graph_trace __read_mostly = {
  252. .name = "function_graph",
  253. .init = graph_trace_init,
  254. .reset = graph_trace_reset,
  255. .print_line = print_graph_function,
  256. .flags = &tracer_flags,
  257. };
  258. static __init int init_graph_trace(void)
  259. {
  260. return register_tracer(&graph_trace);
  261. }
  262. device_initcall(init_graph_trace);