trace_functions.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/slab.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static void tracing_start_function_trace(struct trace_array *tr);
  20. static void tracing_stop_function_trace(struct trace_array *tr);
  21. static void
  22. function_trace_call(unsigned long ip, unsigned long parent_ip,
  23. struct ftrace_ops *op, struct pt_regs *pt_regs);
  24. static void
  25. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  26. struct ftrace_ops *op, struct pt_regs *pt_regs);
  27. static struct ftrace_ops trace_ops;
  28. static struct ftrace_ops trace_stack_ops;
  29. static struct tracer_flags func_flags;
  30. /* Our option */
  31. enum {
  32. TRACE_FUNC_OPT_STACK = 0x1,
  33. };
  34. static int allocate_ftrace_ops(struct trace_array *tr)
  35. {
  36. struct ftrace_ops *ops;
  37. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  38. if (!ops)
  39. return -ENOMEM;
  40. /* Currently only the non stack verision is supported */
  41. ops->func = function_trace_call;
  42. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
  43. tr->ops = ops;
  44. ops->private = tr;
  45. return 0;
  46. }
  47. int ftrace_create_function_files(struct trace_array *tr,
  48. struct dentry *parent)
  49. {
  50. int ret;
  51. /*
  52. * The top level array uses the "global_ops", and the files are
  53. * created on boot up.
  54. */
  55. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  56. return 0;
  57. ret = allocate_ftrace_ops(tr);
  58. if (ret)
  59. return ret;
  60. ftrace_create_filter_files(tr->ops, parent);
  61. return 0;
  62. }
  63. void ftrace_destroy_function_files(struct trace_array *tr)
  64. {
  65. ftrace_destroy_filter_files(tr->ops);
  66. kfree(tr->ops);
  67. tr->ops = NULL;
  68. }
  69. static int function_trace_init(struct trace_array *tr)
  70. {
  71. struct ftrace_ops *ops;
  72. if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
  73. /* There's only one global tr */
  74. if (!trace_ops.private) {
  75. trace_ops.private = tr;
  76. trace_stack_ops.private = tr;
  77. }
  78. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  79. ops = &trace_stack_ops;
  80. else
  81. ops = &trace_ops;
  82. tr->ops = ops;
  83. } else if (!tr->ops) {
  84. /*
  85. * Instance trace_arrays get their ops allocated
  86. * at instance creation. Unless it failed
  87. * the allocation.
  88. */
  89. return -ENOMEM;
  90. }
  91. tr->trace_buffer.cpu = get_cpu();
  92. put_cpu();
  93. tracing_start_cmdline_record();
  94. tracing_start_function_trace(tr);
  95. return 0;
  96. }
  97. static void function_trace_reset(struct trace_array *tr)
  98. {
  99. tracing_stop_function_trace(tr);
  100. tracing_stop_cmdline_record();
  101. }
  102. static void function_trace_start(struct trace_array *tr)
  103. {
  104. tracing_reset_online_cpus(&tr->trace_buffer);
  105. }
  106. static void
  107. function_trace_call(unsigned long ip, unsigned long parent_ip,
  108. struct ftrace_ops *op, struct pt_regs *pt_regs)
  109. {
  110. struct trace_array *tr = op->private;
  111. struct trace_array_cpu *data;
  112. unsigned long flags;
  113. int bit;
  114. int cpu;
  115. int pc;
  116. if (unlikely(!tr->function_enabled))
  117. return;
  118. pc = preempt_count();
  119. preempt_disable_notrace();
  120. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  121. if (bit < 0)
  122. goto out;
  123. cpu = smp_processor_id();
  124. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  125. if (!atomic_read(&data->disabled)) {
  126. local_save_flags(flags);
  127. trace_function(tr, ip, parent_ip, flags, pc);
  128. }
  129. trace_clear_recursion(bit);
  130. out:
  131. preempt_enable_notrace();
  132. }
  133. static void
  134. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  135. struct ftrace_ops *op, struct pt_regs *pt_regs)
  136. {
  137. struct trace_array *tr = op->private;
  138. struct trace_array_cpu *data;
  139. unsigned long flags;
  140. long disabled;
  141. int cpu;
  142. int pc;
  143. if (unlikely(!tr->function_enabled))
  144. return;
  145. /*
  146. * Need to use raw, since this must be called before the
  147. * recursive protection is performed.
  148. */
  149. local_irq_save(flags);
  150. cpu = raw_smp_processor_id();
  151. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  152. disabled = atomic_inc_return(&data->disabled);
  153. if (likely(disabled == 1)) {
  154. pc = preempt_count();
  155. trace_function(tr, ip, parent_ip, flags, pc);
  156. /*
  157. * skip over 5 funcs:
  158. * __ftrace_trace_stack,
  159. * __trace_stack,
  160. * function_stack_trace_call
  161. * ftrace_list_func
  162. * ftrace_call
  163. */
  164. __trace_stack(tr, flags, 5, pc);
  165. }
  166. atomic_dec(&data->disabled);
  167. local_irq_restore(flags);
  168. }
  169. static struct ftrace_ops trace_ops __read_mostly =
  170. {
  171. .func = function_trace_call,
  172. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  173. };
  174. static struct ftrace_ops trace_stack_ops __read_mostly =
  175. {
  176. .func = function_stack_trace_call,
  177. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  178. };
  179. static struct tracer_opt func_opts[] = {
  180. #ifdef CONFIG_STACKTRACE
  181. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  182. #endif
  183. { } /* Always set a last empty entry */
  184. };
  185. static struct tracer_flags func_flags = {
  186. .val = 0, /* By default: all flags disabled */
  187. .opts = func_opts
  188. };
  189. static void tracing_start_function_trace(struct trace_array *tr)
  190. {
  191. tr->function_enabled = 0;
  192. register_ftrace_function(tr->ops);
  193. tr->function_enabled = 1;
  194. }
  195. static void tracing_stop_function_trace(struct trace_array *tr)
  196. {
  197. tr->function_enabled = 0;
  198. unregister_ftrace_function(tr->ops);
  199. }
  200. static int
  201. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  202. {
  203. switch (bit) {
  204. case TRACE_FUNC_OPT_STACK:
  205. /* do nothing if already set */
  206. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  207. break;
  208. unregister_ftrace_function(tr->ops);
  209. if (set) {
  210. tr->ops = &trace_stack_ops;
  211. register_ftrace_function(tr->ops);
  212. } else {
  213. tr->ops = &trace_ops;
  214. register_ftrace_function(tr->ops);
  215. }
  216. break;
  217. default:
  218. return -EINVAL;
  219. }
  220. return 0;
  221. }
  222. static struct tracer function_trace __tracer_data =
  223. {
  224. .name = "function",
  225. .init = function_trace_init,
  226. .reset = function_trace_reset,
  227. .start = function_trace_start,
  228. .wait_pipe = poll_wait_pipe,
  229. .flags = &func_flags,
  230. .set_flag = func_set_flag,
  231. .allow_instances = true,
  232. #ifdef CONFIG_FTRACE_SELFTEST
  233. .selftest = trace_selftest_startup_function,
  234. #endif
  235. };
  236. #ifdef CONFIG_DYNAMIC_FTRACE
  237. static int update_count(void **data)
  238. {
  239. unsigned long *count = (long *)data;
  240. if (!*count)
  241. return 0;
  242. if (*count != -1)
  243. (*count)--;
  244. return 1;
  245. }
  246. static void
  247. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
  248. {
  249. if (tracing_is_on())
  250. return;
  251. if (update_count(data))
  252. tracing_on();
  253. }
  254. static void
  255. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
  256. {
  257. if (!tracing_is_on())
  258. return;
  259. if (update_count(data))
  260. tracing_off();
  261. }
  262. static void
  263. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  264. {
  265. if (tracing_is_on())
  266. return;
  267. tracing_on();
  268. }
  269. static void
  270. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  271. {
  272. if (!tracing_is_on())
  273. return;
  274. tracing_off();
  275. }
  276. /*
  277. * Skip 4:
  278. * ftrace_stacktrace()
  279. * function_trace_probe_call()
  280. * ftrace_ops_list_func()
  281. * ftrace_call()
  282. */
  283. #define STACK_SKIP 4
  284. static void
  285. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
  286. {
  287. trace_dump_stack(STACK_SKIP);
  288. }
  289. static void
  290. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
  291. {
  292. if (!tracing_is_on())
  293. return;
  294. if (update_count(data))
  295. trace_dump_stack(STACK_SKIP);
  296. }
  297. static void
  298. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  299. {
  300. if (update_count(data))
  301. ftrace_dump(DUMP_ALL);
  302. }
  303. /* Only dump the current CPU buffer. */
  304. static void
  305. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  306. {
  307. if (update_count(data))
  308. ftrace_dump(DUMP_ORIG);
  309. }
  310. static int
  311. ftrace_probe_print(const char *name, struct seq_file *m,
  312. unsigned long ip, void *data)
  313. {
  314. long count = (long)data;
  315. seq_printf(m, "%ps:%s", (void *)ip, name);
  316. if (count == -1)
  317. seq_printf(m, ":unlimited\n");
  318. else
  319. seq_printf(m, ":count=%ld\n", count);
  320. return 0;
  321. }
  322. static int
  323. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  324. struct ftrace_probe_ops *ops, void *data)
  325. {
  326. return ftrace_probe_print("traceon", m, ip, data);
  327. }
  328. static int
  329. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  330. struct ftrace_probe_ops *ops, void *data)
  331. {
  332. return ftrace_probe_print("traceoff", m, ip, data);
  333. }
  334. static int
  335. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  336. struct ftrace_probe_ops *ops, void *data)
  337. {
  338. return ftrace_probe_print("stacktrace", m, ip, data);
  339. }
  340. static int
  341. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  342. struct ftrace_probe_ops *ops, void *data)
  343. {
  344. return ftrace_probe_print("dump", m, ip, data);
  345. }
  346. static int
  347. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  348. struct ftrace_probe_ops *ops, void *data)
  349. {
  350. return ftrace_probe_print("cpudump", m, ip, data);
  351. }
  352. static struct ftrace_probe_ops traceon_count_probe_ops = {
  353. .func = ftrace_traceon_count,
  354. .print = ftrace_traceon_print,
  355. };
  356. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  357. .func = ftrace_traceoff_count,
  358. .print = ftrace_traceoff_print,
  359. };
  360. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  361. .func = ftrace_stacktrace_count,
  362. .print = ftrace_stacktrace_print,
  363. };
  364. static struct ftrace_probe_ops dump_probe_ops = {
  365. .func = ftrace_dump_probe,
  366. .print = ftrace_dump_print,
  367. };
  368. static struct ftrace_probe_ops cpudump_probe_ops = {
  369. .func = ftrace_cpudump_probe,
  370. .print = ftrace_cpudump_print,
  371. };
  372. static struct ftrace_probe_ops traceon_probe_ops = {
  373. .func = ftrace_traceon,
  374. .print = ftrace_traceon_print,
  375. };
  376. static struct ftrace_probe_ops traceoff_probe_ops = {
  377. .func = ftrace_traceoff,
  378. .print = ftrace_traceoff_print,
  379. };
  380. static struct ftrace_probe_ops stacktrace_probe_ops = {
  381. .func = ftrace_stacktrace,
  382. .print = ftrace_stacktrace_print,
  383. };
  384. static int
  385. ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
  386. struct ftrace_hash *hash, char *glob,
  387. char *cmd, char *param, int enable)
  388. {
  389. void *count = (void *)-1;
  390. char *number;
  391. int ret;
  392. /* hash funcs only work with set_ftrace_filter */
  393. if (!enable)
  394. return -EINVAL;
  395. if (glob[0] == '!') {
  396. unregister_ftrace_function_probe_func(glob+1, ops);
  397. return 0;
  398. }
  399. if (!param)
  400. goto out_reg;
  401. number = strsep(&param, ":");
  402. if (!strlen(number))
  403. goto out_reg;
  404. /*
  405. * We use the callback data field (which is a pointer)
  406. * as our counter.
  407. */
  408. ret = kstrtoul(number, 0, (unsigned long *)&count);
  409. if (ret)
  410. return ret;
  411. out_reg:
  412. ret = register_ftrace_function_probe(glob, ops, count);
  413. return ret < 0 ? ret : 0;
  414. }
  415. static int
  416. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  417. char *glob, char *cmd, char *param, int enable)
  418. {
  419. struct ftrace_probe_ops *ops;
  420. /* we register both traceon and traceoff to this callback */
  421. if (strcmp(cmd, "traceon") == 0)
  422. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  423. else
  424. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  425. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  426. param, enable);
  427. }
  428. static int
  429. ftrace_stacktrace_callback(struct ftrace_hash *hash,
  430. char *glob, char *cmd, char *param, int enable)
  431. {
  432. struct ftrace_probe_ops *ops;
  433. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  434. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  435. param, enable);
  436. }
  437. static int
  438. ftrace_dump_callback(struct ftrace_hash *hash,
  439. char *glob, char *cmd, char *param, int enable)
  440. {
  441. struct ftrace_probe_ops *ops;
  442. ops = &dump_probe_ops;
  443. /* Only dump once. */
  444. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  445. "1", enable);
  446. }
  447. static int
  448. ftrace_cpudump_callback(struct ftrace_hash *hash,
  449. char *glob, char *cmd, char *param, int enable)
  450. {
  451. struct ftrace_probe_ops *ops;
  452. ops = &cpudump_probe_ops;
  453. /* Only dump once. */
  454. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  455. "1", enable);
  456. }
  457. static struct ftrace_func_command ftrace_traceon_cmd = {
  458. .name = "traceon",
  459. .func = ftrace_trace_onoff_callback,
  460. };
  461. static struct ftrace_func_command ftrace_traceoff_cmd = {
  462. .name = "traceoff",
  463. .func = ftrace_trace_onoff_callback,
  464. };
  465. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  466. .name = "stacktrace",
  467. .func = ftrace_stacktrace_callback,
  468. };
  469. static struct ftrace_func_command ftrace_dump_cmd = {
  470. .name = "dump",
  471. .func = ftrace_dump_callback,
  472. };
  473. static struct ftrace_func_command ftrace_cpudump_cmd = {
  474. .name = "cpudump",
  475. .func = ftrace_cpudump_callback,
  476. };
  477. static int __init init_func_cmd_traceon(void)
  478. {
  479. int ret;
  480. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  481. if (ret)
  482. return ret;
  483. ret = register_ftrace_command(&ftrace_traceon_cmd);
  484. if (ret)
  485. goto out_free_traceoff;
  486. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  487. if (ret)
  488. goto out_free_traceon;
  489. ret = register_ftrace_command(&ftrace_dump_cmd);
  490. if (ret)
  491. goto out_free_stacktrace;
  492. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  493. if (ret)
  494. goto out_free_dump;
  495. return 0;
  496. out_free_dump:
  497. unregister_ftrace_command(&ftrace_dump_cmd);
  498. out_free_stacktrace:
  499. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  500. out_free_traceon:
  501. unregister_ftrace_command(&ftrace_traceon_cmd);
  502. out_free_traceoff:
  503. unregister_ftrace_command(&ftrace_traceoff_cmd);
  504. return ret;
  505. }
  506. #else
  507. static inline int init_func_cmd_traceon(void)
  508. {
  509. return 0;
  510. }
  511. #endif /* CONFIG_DYNAMIC_FTRACE */
  512. static __init int init_function_trace(void)
  513. {
  514. init_func_cmd_traceon();
  515. return register_tracer(&function_trace);
  516. }
  517. core_initcall(init_function_trace);