trace_functions.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/fs.h>
  17. #include "trace.h"
  18. /* function tracing enabled */
  19. static int ftrace_function_enabled;
  20. static struct trace_array *func_trace;
  21. static void tracing_start_function_trace(void);
  22. static void tracing_stop_function_trace(void);
  23. static int function_trace_init(struct trace_array *tr)
  24. {
  25. func_trace = tr;
  26. tr->trace_buffer.cpu = get_cpu();
  27. put_cpu();
  28. tracing_start_cmdline_record();
  29. tracing_start_function_trace();
  30. return 0;
  31. }
  32. static void function_trace_reset(struct trace_array *tr)
  33. {
  34. tracing_stop_function_trace();
  35. tracing_stop_cmdline_record();
  36. }
  37. static void function_trace_start(struct trace_array *tr)
  38. {
  39. tracing_reset_online_cpus(&tr->trace_buffer);
  40. }
  41. /* Our option */
  42. enum {
  43. TRACE_FUNC_OPT_STACK = 0x1,
  44. };
  45. static struct tracer_flags func_flags;
  46. static void
  47. function_trace_call(unsigned long ip, unsigned long parent_ip,
  48. struct ftrace_ops *op, struct pt_regs *pt_regs)
  49. {
  50. struct trace_array *tr = func_trace;
  51. struct trace_array_cpu *data;
  52. unsigned long flags;
  53. int bit;
  54. int cpu;
  55. int pc;
  56. if (unlikely(!ftrace_function_enabled))
  57. return;
  58. pc = preempt_count();
  59. preempt_disable_notrace();
  60. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  61. if (bit < 0)
  62. goto out;
  63. cpu = smp_processor_id();
  64. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  65. if (!atomic_read(&data->disabled)) {
  66. local_save_flags(flags);
  67. trace_function(tr, ip, parent_ip, flags, pc);
  68. }
  69. trace_clear_recursion(bit);
  70. out:
  71. preempt_enable_notrace();
  72. }
  73. static void
  74. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  75. struct ftrace_ops *op, struct pt_regs *pt_regs)
  76. {
  77. struct trace_array *tr = func_trace;
  78. struct trace_array_cpu *data;
  79. unsigned long flags;
  80. long disabled;
  81. int cpu;
  82. int pc;
  83. if (unlikely(!ftrace_function_enabled))
  84. return;
  85. /*
  86. * Need to use raw, since this must be called before the
  87. * recursive protection is performed.
  88. */
  89. local_irq_save(flags);
  90. cpu = raw_smp_processor_id();
  91. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  92. disabled = atomic_inc_return(&data->disabled);
  93. if (likely(disabled == 1)) {
  94. pc = preempt_count();
  95. trace_function(tr, ip, parent_ip, flags, pc);
  96. /*
  97. * skip over 5 funcs:
  98. * __ftrace_trace_stack,
  99. * __trace_stack,
  100. * function_stack_trace_call
  101. * ftrace_list_func
  102. * ftrace_call
  103. */
  104. __trace_stack(tr, flags, 5, pc);
  105. }
  106. atomic_dec(&data->disabled);
  107. local_irq_restore(flags);
  108. }
  109. static struct ftrace_ops trace_ops __read_mostly =
  110. {
  111. .func = function_trace_call,
  112. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  113. };
  114. static struct ftrace_ops trace_stack_ops __read_mostly =
  115. {
  116. .func = function_stack_trace_call,
  117. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  118. };
  119. static struct tracer_opt func_opts[] = {
  120. #ifdef CONFIG_STACKTRACE
  121. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  122. #endif
  123. { } /* Always set a last empty entry */
  124. };
  125. static struct tracer_flags func_flags = {
  126. .val = 0, /* By default: all flags disabled */
  127. .opts = func_opts
  128. };
  129. static void tracing_start_function_trace(void)
  130. {
  131. ftrace_function_enabled = 0;
  132. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  133. register_ftrace_function(&trace_stack_ops);
  134. else
  135. register_ftrace_function(&trace_ops);
  136. ftrace_function_enabled = 1;
  137. }
  138. static void tracing_stop_function_trace(void)
  139. {
  140. ftrace_function_enabled = 0;
  141. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  142. unregister_ftrace_function(&trace_stack_ops);
  143. else
  144. unregister_ftrace_function(&trace_ops);
  145. }
  146. static int
  147. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  148. {
  149. switch (bit) {
  150. case TRACE_FUNC_OPT_STACK:
  151. /* do nothing if already set */
  152. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  153. break;
  154. if (set) {
  155. unregister_ftrace_function(&trace_ops);
  156. register_ftrace_function(&trace_stack_ops);
  157. } else {
  158. unregister_ftrace_function(&trace_stack_ops);
  159. register_ftrace_function(&trace_ops);
  160. }
  161. break;
  162. default:
  163. return -EINVAL;
  164. }
  165. return 0;
  166. }
  167. static struct tracer function_trace __tracer_data =
  168. {
  169. .name = "function",
  170. .init = function_trace_init,
  171. .reset = function_trace_reset,
  172. .start = function_trace_start,
  173. .wait_pipe = poll_wait_pipe,
  174. .flags = &func_flags,
  175. .set_flag = func_set_flag,
  176. #ifdef CONFIG_FTRACE_SELFTEST
  177. .selftest = trace_selftest_startup_function,
  178. #endif
  179. };
  180. #ifdef CONFIG_DYNAMIC_FTRACE
  181. static int update_count(void **data)
  182. {
  183. unsigned long *count = (long *)data;
  184. if (!*count)
  185. return 0;
  186. if (*count != -1)
  187. (*count)--;
  188. return 1;
  189. }
  190. static void
  191. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
  192. {
  193. if (tracing_is_on())
  194. return;
  195. if (update_count(data))
  196. tracing_on();
  197. }
  198. static void
  199. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
  200. {
  201. if (!tracing_is_on())
  202. return;
  203. if (update_count(data))
  204. tracing_off();
  205. }
  206. static void
  207. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  208. {
  209. if (tracing_is_on())
  210. return;
  211. tracing_on();
  212. }
  213. static void
  214. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  215. {
  216. if (!tracing_is_on())
  217. return;
  218. tracing_off();
  219. }
  220. /*
  221. * Skip 4:
  222. * ftrace_stacktrace()
  223. * function_trace_probe_call()
  224. * ftrace_ops_list_func()
  225. * ftrace_call()
  226. */
  227. #define STACK_SKIP 4
  228. static void
  229. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
  230. {
  231. trace_dump_stack(STACK_SKIP);
  232. }
  233. static void
  234. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
  235. {
  236. if (!tracing_is_on())
  237. return;
  238. if (update_count(data))
  239. trace_dump_stack(STACK_SKIP);
  240. }
  241. static void
  242. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  243. {
  244. if (update_count(data))
  245. ftrace_dump(DUMP_ALL);
  246. }
  247. /* Only dump the current CPU buffer. */
  248. static void
  249. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  250. {
  251. if (update_count(data))
  252. ftrace_dump(DUMP_ORIG);
  253. }
  254. static int
  255. ftrace_probe_print(const char *name, struct seq_file *m,
  256. unsigned long ip, void *data)
  257. {
  258. long count = (long)data;
  259. seq_printf(m, "%ps:%s", (void *)ip, name);
  260. if (count == -1)
  261. seq_printf(m, ":unlimited\n");
  262. else
  263. seq_printf(m, ":count=%ld\n", count);
  264. return 0;
  265. }
  266. static int
  267. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  268. struct ftrace_probe_ops *ops, void *data)
  269. {
  270. return ftrace_probe_print("traceon", m, ip, data);
  271. }
  272. static int
  273. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  274. struct ftrace_probe_ops *ops, void *data)
  275. {
  276. return ftrace_probe_print("traceoff", m, ip, data);
  277. }
  278. static int
  279. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  280. struct ftrace_probe_ops *ops, void *data)
  281. {
  282. return ftrace_probe_print("stacktrace", m, ip, data);
  283. }
  284. static int
  285. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  286. struct ftrace_probe_ops *ops, void *data)
  287. {
  288. return ftrace_probe_print("dump", m, ip, data);
  289. }
  290. static int
  291. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  292. struct ftrace_probe_ops *ops, void *data)
  293. {
  294. return ftrace_probe_print("cpudump", m, ip, data);
  295. }
  296. static struct ftrace_probe_ops traceon_count_probe_ops = {
  297. .func = ftrace_traceon_count,
  298. .print = ftrace_traceon_print,
  299. };
  300. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  301. .func = ftrace_traceoff_count,
  302. .print = ftrace_traceoff_print,
  303. };
  304. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  305. .func = ftrace_stacktrace_count,
  306. .print = ftrace_stacktrace_print,
  307. };
  308. static struct ftrace_probe_ops dump_probe_ops = {
  309. .func = ftrace_dump_probe,
  310. .print = ftrace_dump_print,
  311. };
  312. static struct ftrace_probe_ops cpudump_probe_ops = {
  313. .func = ftrace_cpudump_probe,
  314. .print = ftrace_cpudump_print,
  315. };
  316. static struct ftrace_probe_ops traceon_probe_ops = {
  317. .func = ftrace_traceon,
  318. .print = ftrace_traceon_print,
  319. };
  320. static struct ftrace_probe_ops traceoff_probe_ops = {
  321. .func = ftrace_traceoff,
  322. .print = ftrace_traceoff_print,
  323. };
  324. static struct ftrace_probe_ops stacktrace_probe_ops = {
  325. .func = ftrace_stacktrace,
  326. .print = ftrace_stacktrace_print,
  327. };
  328. static int
  329. ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
  330. struct ftrace_hash *hash, char *glob,
  331. char *cmd, char *param, int enable)
  332. {
  333. void *count = (void *)-1;
  334. char *number;
  335. int ret;
  336. /* hash funcs only work with set_ftrace_filter */
  337. if (!enable)
  338. return -EINVAL;
  339. if (glob[0] == '!') {
  340. unregister_ftrace_function_probe_func(glob+1, ops);
  341. return 0;
  342. }
  343. if (!param)
  344. goto out_reg;
  345. number = strsep(&param, ":");
  346. if (!strlen(number))
  347. goto out_reg;
  348. /*
  349. * We use the callback data field (which is a pointer)
  350. * as our counter.
  351. */
  352. ret = kstrtoul(number, 0, (unsigned long *)&count);
  353. if (ret)
  354. return ret;
  355. out_reg:
  356. ret = register_ftrace_function_probe(glob, ops, count);
  357. return ret < 0 ? ret : 0;
  358. }
  359. static int
  360. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  361. char *glob, char *cmd, char *param, int enable)
  362. {
  363. struct ftrace_probe_ops *ops;
  364. /* we register both traceon and traceoff to this callback */
  365. if (strcmp(cmd, "traceon") == 0)
  366. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  367. else
  368. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  369. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  370. param, enable);
  371. }
  372. static int
  373. ftrace_stacktrace_callback(struct ftrace_hash *hash,
  374. char *glob, char *cmd, char *param, int enable)
  375. {
  376. struct ftrace_probe_ops *ops;
  377. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  378. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  379. param, enable);
  380. }
  381. static int
  382. ftrace_dump_callback(struct ftrace_hash *hash,
  383. char *glob, char *cmd, char *param, int enable)
  384. {
  385. struct ftrace_probe_ops *ops;
  386. ops = &dump_probe_ops;
  387. /* Only dump once. */
  388. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  389. "1", enable);
  390. }
  391. static int
  392. ftrace_cpudump_callback(struct ftrace_hash *hash,
  393. char *glob, char *cmd, char *param, int enable)
  394. {
  395. struct ftrace_probe_ops *ops;
  396. ops = &cpudump_probe_ops;
  397. /* Only dump once. */
  398. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  399. "1", enable);
  400. }
  401. static struct ftrace_func_command ftrace_traceon_cmd = {
  402. .name = "traceon",
  403. .func = ftrace_trace_onoff_callback,
  404. };
  405. static struct ftrace_func_command ftrace_traceoff_cmd = {
  406. .name = "traceoff",
  407. .func = ftrace_trace_onoff_callback,
  408. };
  409. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  410. .name = "stacktrace",
  411. .func = ftrace_stacktrace_callback,
  412. };
  413. static struct ftrace_func_command ftrace_dump_cmd = {
  414. .name = "dump",
  415. .func = ftrace_dump_callback,
  416. };
  417. static struct ftrace_func_command ftrace_cpudump_cmd = {
  418. .name = "cpudump",
  419. .func = ftrace_cpudump_callback,
  420. };
  421. static int __init init_func_cmd_traceon(void)
  422. {
  423. int ret;
  424. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  425. if (ret)
  426. return ret;
  427. ret = register_ftrace_command(&ftrace_traceon_cmd);
  428. if (ret)
  429. goto out_free_traceoff;
  430. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  431. if (ret)
  432. goto out_free_traceon;
  433. ret = register_ftrace_command(&ftrace_dump_cmd);
  434. if (ret)
  435. goto out_free_stacktrace;
  436. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  437. if (ret)
  438. goto out_free_dump;
  439. return 0;
  440. out_free_dump:
  441. unregister_ftrace_command(&ftrace_dump_cmd);
  442. out_free_stacktrace:
  443. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  444. out_free_traceon:
  445. unregister_ftrace_command(&ftrace_traceon_cmd);
  446. out_free_traceoff:
  447. unregister_ftrace_command(&ftrace_traceoff_cmd);
  448. return ret;
  449. }
  450. #else
  451. static inline int init_func_cmd_traceon(void)
  452. {
  453. return 0;
  454. }
  455. #endif /* CONFIG_DYNAMIC_FTRACE */
  456. static __init int init_function_trace(void)
  457. {
  458. init_func_cmd_traceon();
  459. return register_tracer(&function_trace);
  460. }
  461. core_initcall(init_function_trace);