trace_functions.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/slab.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static void tracing_start_function_trace(struct trace_array *tr);
  20. static void tracing_stop_function_trace(struct trace_array *tr);
  21. static void
  22. function_trace_call(unsigned long ip, unsigned long parent_ip,
  23. struct ftrace_ops *op, struct pt_regs *pt_regs);
  24. static void
  25. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  26. struct ftrace_ops *op, struct pt_regs *pt_regs);
  27. static struct ftrace_ops trace_ops;
  28. static struct ftrace_ops trace_stack_ops;
  29. static struct tracer_flags func_flags;
  30. /* Our option */
  31. enum {
  32. TRACE_FUNC_OPT_STACK = 0x1,
  33. };
  34. static int allocate_ftrace_ops(struct trace_array *tr)
  35. {
  36. struct ftrace_ops *ops;
  37. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  38. if (!ops)
  39. return -ENOMEM;
  40. /* Currently only the non stack verision is supported */
  41. ops->func = function_trace_call;
  42. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
  43. tr->ops = ops;
  44. ops->private = tr;
  45. return 0;
  46. }
  47. int ftrace_create_function_files(struct trace_array *tr,
  48. struct dentry *parent)
  49. {
  50. int ret;
  51. /* The top level array uses the "global_ops". */
  52. if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) {
  53. ret = allocate_ftrace_ops(tr);
  54. if (ret)
  55. return ret;
  56. }
  57. ftrace_create_filter_files(tr->ops, parent);
  58. return 0;
  59. }
  60. void ftrace_destroy_function_files(struct trace_array *tr)
  61. {
  62. ftrace_destroy_filter_files(tr->ops);
  63. kfree(tr->ops);
  64. tr->ops = NULL;
  65. }
  66. static int function_trace_init(struct trace_array *tr)
  67. {
  68. struct ftrace_ops *ops;
  69. if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
  70. /* There's only one global tr */
  71. if (!trace_ops.private) {
  72. trace_ops.private = tr;
  73. trace_stack_ops.private = tr;
  74. }
  75. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  76. ops = &trace_stack_ops;
  77. else
  78. ops = &trace_ops;
  79. tr->ops = ops;
  80. } else if (!tr->ops) {
  81. /*
  82. * Instance trace_arrays get their ops allocated
  83. * at instance creation. Unless it failed
  84. * the allocation.
  85. */
  86. return -ENOMEM;
  87. }
  88. tr->trace_buffer.cpu = get_cpu();
  89. put_cpu();
  90. tracing_start_cmdline_record();
  91. tracing_start_function_trace(tr);
  92. return 0;
  93. }
  94. static void function_trace_reset(struct trace_array *tr)
  95. {
  96. tracing_stop_function_trace(tr);
  97. tracing_stop_cmdline_record();
  98. }
  99. static void function_trace_start(struct trace_array *tr)
  100. {
  101. tracing_reset_online_cpus(&tr->trace_buffer);
  102. }
  103. static void
  104. function_trace_call(unsigned long ip, unsigned long parent_ip,
  105. struct ftrace_ops *op, struct pt_regs *pt_regs)
  106. {
  107. struct trace_array *tr = op->private;
  108. struct trace_array_cpu *data;
  109. unsigned long flags;
  110. int bit;
  111. int cpu;
  112. int pc;
  113. if (unlikely(!tr->function_enabled))
  114. return;
  115. pc = preempt_count();
  116. preempt_disable_notrace();
  117. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  118. if (bit < 0)
  119. goto out;
  120. cpu = smp_processor_id();
  121. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  122. if (!atomic_read(&data->disabled)) {
  123. local_save_flags(flags);
  124. trace_function(tr, ip, parent_ip, flags, pc);
  125. }
  126. trace_clear_recursion(bit);
  127. out:
  128. preempt_enable_notrace();
  129. }
  130. static void
  131. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  132. struct ftrace_ops *op, struct pt_regs *pt_regs)
  133. {
  134. struct trace_array *tr = op->private;
  135. struct trace_array_cpu *data;
  136. unsigned long flags;
  137. long disabled;
  138. int cpu;
  139. int pc;
  140. if (unlikely(!tr->function_enabled))
  141. return;
  142. /*
  143. * Need to use raw, since this must be called before the
  144. * recursive protection is performed.
  145. */
  146. local_irq_save(flags);
  147. cpu = raw_smp_processor_id();
  148. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  149. disabled = atomic_inc_return(&data->disabled);
  150. if (likely(disabled == 1)) {
  151. pc = preempt_count();
  152. trace_function(tr, ip, parent_ip, flags, pc);
  153. /*
  154. * skip over 5 funcs:
  155. * __ftrace_trace_stack,
  156. * __trace_stack,
  157. * function_stack_trace_call
  158. * ftrace_list_func
  159. * ftrace_call
  160. */
  161. __trace_stack(tr, flags, 5, pc);
  162. }
  163. atomic_dec(&data->disabled);
  164. local_irq_restore(flags);
  165. }
  166. static struct ftrace_ops trace_ops __read_mostly =
  167. {
  168. .func = function_trace_call,
  169. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  170. };
  171. static struct ftrace_ops trace_stack_ops __read_mostly =
  172. {
  173. .func = function_stack_trace_call,
  174. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  175. };
  176. static struct tracer_opt func_opts[] = {
  177. #ifdef CONFIG_STACKTRACE
  178. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  179. #endif
  180. { } /* Always set a last empty entry */
  181. };
  182. static struct tracer_flags func_flags = {
  183. .val = 0, /* By default: all flags disabled */
  184. .opts = func_opts
  185. };
  186. static void tracing_start_function_trace(struct trace_array *tr)
  187. {
  188. tr->function_enabled = 0;
  189. register_ftrace_function(tr->ops);
  190. tr->function_enabled = 1;
  191. }
  192. static void tracing_stop_function_trace(struct trace_array *tr)
  193. {
  194. tr->function_enabled = 0;
  195. unregister_ftrace_function(tr->ops);
  196. }
  197. static int
  198. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  199. {
  200. switch (bit) {
  201. case TRACE_FUNC_OPT_STACK:
  202. /* do nothing if already set */
  203. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  204. break;
  205. unregister_ftrace_function(tr->ops);
  206. if (set) {
  207. tr->ops = &trace_stack_ops;
  208. register_ftrace_function(tr->ops);
  209. } else {
  210. tr->ops = &trace_ops;
  211. register_ftrace_function(tr->ops);
  212. }
  213. break;
  214. default:
  215. return -EINVAL;
  216. }
  217. return 0;
  218. }
  219. static struct tracer function_trace __tracer_data =
  220. {
  221. .name = "function",
  222. .init = function_trace_init,
  223. .reset = function_trace_reset,
  224. .start = function_trace_start,
  225. .wait_pipe = poll_wait_pipe,
  226. .flags = &func_flags,
  227. .set_flag = func_set_flag,
  228. .allow_instances = true,
  229. #ifdef CONFIG_FTRACE_SELFTEST
  230. .selftest = trace_selftest_startup_function,
  231. #endif
  232. };
  233. #ifdef CONFIG_DYNAMIC_FTRACE
  234. static int update_count(void **data)
  235. {
  236. unsigned long *count = (long *)data;
  237. if (!*count)
  238. return 0;
  239. if (*count != -1)
  240. (*count)--;
  241. return 1;
  242. }
  243. static void
  244. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
  245. {
  246. if (tracing_is_on())
  247. return;
  248. if (update_count(data))
  249. tracing_on();
  250. }
  251. static void
  252. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
  253. {
  254. if (!tracing_is_on())
  255. return;
  256. if (update_count(data))
  257. tracing_off();
  258. }
  259. static void
  260. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  261. {
  262. if (tracing_is_on())
  263. return;
  264. tracing_on();
  265. }
  266. static void
  267. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  268. {
  269. if (!tracing_is_on())
  270. return;
  271. tracing_off();
  272. }
  273. /*
  274. * Skip 4:
  275. * ftrace_stacktrace()
  276. * function_trace_probe_call()
  277. * ftrace_ops_list_func()
  278. * ftrace_call()
  279. */
  280. #define STACK_SKIP 4
  281. static void
  282. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
  283. {
  284. trace_dump_stack(STACK_SKIP);
  285. }
  286. static void
  287. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
  288. {
  289. if (!tracing_is_on())
  290. return;
  291. if (update_count(data))
  292. trace_dump_stack(STACK_SKIP);
  293. }
  294. static void
  295. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  296. {
  297. if (update_count(data))
  298. ftrace_dump(DUMP_ALL);
  299. }
  300. /* Only dump the current CPU buffer. */
  301. static void
  302. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  303. {
  304. if (update_count(data))
  305. ftrace_dump(DUMP_ORIG);
  306. }
  307. static int
  308. ftrace_probe_print(const char *name, struct seq_file *m,
  309. unsigned long ip, void *data)
  310. {
  311. long count = (long)data;
  312. seq_printf(m, "%ps:%s", (void *)ip, name);
  313. if (count == -1)
  314. seq_printf(m, ":unlimited\n");
  315. else
  316. seq_printf(m, ":count=%ld\n", count);
  317. return 0;
  318. }
  319. static int
  320. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  321. struct ftrace_probe_ops *ops, void *data)
  322. {
  323. return ftrace_probe_print("traceon", m, ip, data);
  324. }
  325. static int
  326. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  327. struct ftrace_probe_ops *ops, void *data)
  328. {
  329. return ftrace_probe_print("traceoff", m, ip, data);
  330. }
  331. static int
  332. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  333. struct ftrace_probe_ops *ops, void *data)
  334. {
  335. return ftrace_probe_print("stacktrace", m, ip, data);
  336. }
  337. static int
  338. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  339. struct ftrace_probe_ops *ops, void *data)
  340. {
  341. return ftrace_probe_print("dump", m, ip, data);
  342. }
  343. static int
  344. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  345. struct ftrace_probe_ops *ops, void *data)
  346. {
  347. return ftrace_probe_print("cpudump", m, ip, data);
  348. }
  349. static struct ftrace_probe_ops traceon_count_probe_ops = {
  350. .func = ftrace_traceon_count,
  351. .print = ftrace_traceon_print,
  352. };
  353. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  354. .func = ftrace_traceoff_count,
  355. .print = ftrace_traceoff_print,
  356. };
  357. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  358. .func = ftrace_stacktrace_count,
  359. .print = ftrace_stacktrace_print,
  360. };
  361. static struct ftrace_probe_ops dump_probe_ops = {
  362. .func = ftrace_dump_probe,
  363. .print = ftrace_dump_print,
  364. };
  365. static struct ftrace_probe_ops cpudump_probe_ops = {
  366. .func = ftrace_cpudump_probe,
  367. .print = ftrace_cpudump_print,
  368. };
  369. static struct ftrace_probe_ops traceon_probe_ops = {
  370. .func = ftrace_traceon,
  371. .print = ftrace_traceon_print,
  372. };
  373. static struct ftrace_probe_ops traceoff_probe_ops = {
  374. .func = ftrace_traceoff,
  375. .print = ftrace_traceoff_print,
  376. };
  377. static struct ftrace_probe_ops stacktrace_probe_ops = {
  378. .func = ftrace_stacktrace,
  379. .print = ftrace_stacktrace_print,
  380. };
  381. static int
  382. ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
  383. struct ftrace_hash *hash, char *glob,
  384. char *cmd, char *param, int enable)
  385. {
  386. void *count = (void *)-1;
  387. char *number;
  388. int ret;
  389. /* hash funcs only work with set_ftrace_filter */
  390. if (!enable)
  391. return -EINVAL;
  392. if (glob[0] == '!') {
  393. unregister_ftrace_function_probe_func(glob+1, ops);
  394. return 0;
  395. }
  396. if (!param)
  397. goto out_reg;
  398. number = strsep(&param, ":");
  399. if (!strlen(number))
  400. goto out_reg;
  401. /*
  402. * We use the callback data field (which is a pointer)
  403. * as our counter.
  404. */
  405. ret = kstrtoul(number, 0, (unsigned long *)&count);
  406. if (ret)
  407. return ret;
  408. out_reg:
  409. ret = register_ftrace_function_probe(glob, ops, count);
  410. return ret < 0 ? ret : 0;
  411. }
  412. static int
  413. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  414. char *glob, char *cmd, char *param, int enable)
  415. {
  416. struct ftrace_probe_ops *ops;
  417. /* we register both traceon and traceoff to this callback */
  418. if (strcmp(cmd, "traceon") == 0)
  419. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  420. else
  421. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  422. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  423. param, enable);
  424. }
  425. static int
  426. ftrace_stacktrace_callback(struct ftrace_hash *hash,
  427. char *glob, char *cmd, char *param, int enable)
  428. {
  429. struct ftrace_probe_ops *ops;
  430. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  431. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  432. param, enable);
  433. }
  434. static int
  435. ftrace_dump_callback(struct ftrace_hash *hash,
  436. char *glob, char *cmd, char *param, int enable)
  437. {
  438. struct ftrace_probe_ops *ops;
  439. ops = &dump_probe_ops;
  440. /* Only dump once. */
  441. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  442. "1", enable);
  443. }
  444. static int
  445. ftrace_cpudump_callback(struct ftrace_hash *hash,
  446. char *glob, char *cmd, char *param, int enable)
  447. {
  448. struct ftrace_probe_ops *ops;
  449. ops = &cpudump_probe_ops;
  450. /* Only dump once. */
  451. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  452. "1", enable);
  453. }
  454. static struct ftrace_func_command ftrace_traceon_cmd = {
  455. .name = "traceon",
  456. .func = ftrace_trace_onoff_callback,
  457. };
  458. static struct ftrace_func_command ftrace_traceoff_cmd = {
  459. .name = "traceoff",
  460. .func = ftrace_trace_onoff_callback,
  461. };
  462. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  463. .name = "stacktrace",
  464. .func = ftrace_stacktrace_callback,
  465. };
  466. static struct ftrace_func_command ftrace_dump_cmd = {
  467. .name = "dump",
  468. .func = ftrace_dump_callback,
  469. };
  470. static struct ftrace_func_command ftrace_cpudump_cmd = {
  471. .name = "cpudump",
  472. .func = ftrace_cpudump_callback,
  473. };
  474. static int __init init_func_cmd_traceon(void)
  475. {
  476. int ret;
  477. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  478. if (ret)
  479. return ret;
  480. ret = register_ftrace_command(&ftrace_traceon_cmd);
  481. if (ret)
  482. goto out_free_traceoff;
  483. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  484. if (ret)
  485. goto out_free_traceon;
  486. ret = register_ftrace_command(&ftrace_dump_cmd);
  487. if (ret)
  488. goto out_free_stacktrace;
  489. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  490. if (ret)
  491. goto out_free_dump;
  492. return 0;
  493. out_free_dump:
  494. unregister_ftrace_command(&ftrace_dump_cmd);
  495. out_free_stacktrace:
  496. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  497. out_free_traceon:
  498. unregister_ftrace_command(&ftrace_traceon_cmd);
  499. out_free_traceoff:
  500. unregister_ftrace_command(&ftrace_traceoff_cmd);
  501. return ret;
  502. }
  503. #else
  504. static inline int init_func_cmd_traceon(void)
  505. {
  506. return 0;
  507. }
  508. #endif /* CONFIG_DYNAMIC_FTRACE */
  509. static __init int init_function_trace(void)
  510. {
  511. init_func_cmd_traceon();
  512. return register_tracer(&function_trace);
  513. }
  514. core_initcall(init_function_trace);