trace_functions.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/slab.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static void tracing_start_function_trace(struct trace_array *tr);
  20. static void tracing_stop_function_trace(struct trace_array *tr);
  21. static void
  22. function_trace_call(unsigned long ip, unsigned long parent_ip,
  23. struct ftrace_ops *op, struct pt_regs *pt_regs);
  24. static void
  25. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  26. struct ftrace_ops *op, struct pt_regs *pt_regs);
  27. static struct tracer_flags func_flags;
  28. /* Our option */
  29. enum {
  30. TRACE_FUNC_OPT_STACK = 0x1,
  31. };
  32. static int allocate_ftrace_ops(struct trace_array *tr)
  33. {
  34. struct ftrace_ops *ops;
  35. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  36. if (!ops)
  37. return -ENOMEM;
  38. /* Currently only the non stack verision is supported */
  39. ops->func = function_trace_call;
  40. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
  41. tr->ops = ops;
  42. ops->private = tr;
  43. return 0;
  44. }
  45. int ftrace_create_function_files(struct trace_array *tr,
  46. struct dentry *parent)
  47. {
  48. int ret;
  49. /*
  50. * The top level array uses the "global_ops", and the files are
  51. * created on boot up.
  52. */
  53. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  54. return 0;
  55. ret = allocate_ftrace_ops(tr);
  56. if (ret)
  57. return ret;
  58. ftrace_create_filter_files(tr->ops, parent);
  59. return 0;
  60. }
  61. void ftrace_destroy_function_files(struct trace_array *tr)
  62. {
  63. ftrace_destroy_filter_files(tr->ops);
  64. kfree(tr->ops);
  65. tr->ops = NULL;
  66. }
  67. static int function_trace_init(struct trace_array *tr)
  68. {
  69. ftrace_func_t func;
  70. /*
  71. * Instance trace_arrays get their ops allocated
  72. * at instance creation. Unless it failed
  73. * the allocation.
  74. */
  75. if (!tr->ops)
  76. return -ENOMEM;
  77. /* Currently only the global instance can do stack tracing */
  78. if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  79. func_flags.val & TRACE_FUNC_OPT_STACK)
  80. func = function_stack_trace_call;
  81. else
  82. func = function_trace_call;
  83. ftrace_init_array_ops(tr, func);
  84. tr->trace_buffer.cpu = get_cpu();
  85. put_cpu();
  86. tracing_start_cmdline_record();
  87. tracing_start_function_trace(tr);
  88. return 0;
  89. }
  90. static void function_trace_reset(struct trace_array *tr)
  91. {
  92. tracing_stop_function_trace(tr);
  93. tracing_stop_cmdline_record();
  94. ftrace_reset_array_ops(tr);
  95. }
  96. static void function_trace_start(struct trace_array *tr)
  97. {
  98. tracing_reset_online_cpus(&tr->trace_buffer);
  99. }
  100. static void
  101. function_trace_call(unsigned long ip, unsigned long parent_ip,
  102. struct ftrace_ops *op, struct pt_regs *pt_regs)
  103. {
  104. struct trace_array *tr = op->private;
  105. struct trace_array_cpu *data;
  106. unsigned long flags;
  107. int bit;
  108. int cpu;
  109. int pc;
  110. if (unlikely(!tr->function_enabled))
  111. return;
  112. pc = preempt_count();
  113. preempt_disable_notrace();
  114. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  115. if (bit < 0)
  116. goto out;
  117. cpu = smp_processor_id();
  118. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  119. if (!atomic_read(&data->disabled)) {
  120. local_save_flags(flags);
  121. trace_function(tr, ip, parent_ip, flags, pc);
  122. }
  123. trace_clear_recursion(bit);
  124. out:
  125. preempt_enable_notrace();
  126. }
  127. static void
  128. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  129. struct ftrace_ops *op, struct pt_regs *pt_regs)
  130. {
  131. struct trace_array *tr = op->private;
  132. struct trace_array_cpu *data;
  133. unsigned long flags;
  134. long disabled;
  135. int cpu;
  136. int pc;
  137. if (unlikely(!tr->function_enabled))
  138. return;
  139. /*
  140. * Need to use raw, since this must be called before the
  141. * recursive protection is performed.
  142. */
  143. local_irq_save(flags);
  144. cpu = raw_smp_processor_id();
  145. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  146. disabled = atomic_inc_return(&data->disabled);
  147. if (likely(disabled == 1)) {
  148. pc = preempt_count();
  149. trace_function(tr, ip, parent_ip, flags, pc);
  150. /*
  151. * skip over 5 funcs:
  152. * __ftrace_trace_stack,
  153. * __trace_stack,
  154. * function_stack_trace_call
  155. * ftrace_list_func
  156. * ftrace_call
  157. */
  158. __trace_stack(tr, flags, 5, pc);
  159. }
  160. atomic_dec(&data->disabled);
  161. local_irq_restore(flags);
  162. }
  163. static struct tracer_opt func_opts[] = {
  164. #ifdef CONFIG_STACKTRACE
  165. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  166. #endif
  167. { } /* Always set a last empty entry */
  168. };
  169. static struct tracer_flags func_flags = {
  170. .val = 0, /* By default: all flags disabled */
  171. .opts = func_opts
  172. };
  173. static void tracing_start_function_trace(struct trace_array *tr)
  174. {
  175. tr->function_enabled = 0;
  176. register_ftrace_function(tr->ops);
  177. tr->function_enabled = 1;
  178. }
  179. static void tracing_stop_function_trace(struct trace_array *tr)
  180. {
  181. tr->function_enabled = 0;
  182. unregister_ftrace_function(tr->ops);
  183. }
  184. static int
  185. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  186. {
  187. switch (bit) {
  188. case TRACE_FUNC_OPT_STACK:
  189. /* do nothing if already set */
  190. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  191. break;
  192. unregister_ftrace_function(tr->ops);
  193. if (set) {
  194. tr->ops->func = function_stack_trace_call;
  195. register_ftrace_function(tr->ops);
  196. } else {
  197. tr->ops->func = function_trace_call;
  198. register_ftrace_function(tr->ops);
  199. }
  200. break;
  201. default:
  202. return -EINVAL;
  203. }
  204. return 0;
  205. }
  206. static struct tracer function_trace __tracer_data =
  207. {
  208. .name = "function",
  209. .init = function_trace_init,
  210. .reset = function_trace_reset,
  211. .start = function_trace_start,
  212. .flags = &func_flags,
  213. .set_flag = func_set_flag,
  214. .allow_instances = true,
  215. #ifdef CONFIG_FTRACE_SELFTEST
  216. .selftest = trace_selftest_startup_function,
  217. #endif
  218. };
  219. #ifdef CONFIG_DYNAMIC_FTRACE
  220. static int update_count(void **data)
  221. {
  222. unsigned long *count = (long *)data;
  223. if (!*count)
  224. return 0;
  225. if (*count != -1)
  226. (*count)--;
  227. return 1;
  228. }
  229. static void
  230. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
  231. {
  232. if (tracing_is_on())
  233. return;
  234. if (update_count(data))
  235. tracing_on();
  236. }
  237. static void
  238. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
  239. {
  240. if (!tracing_is_on())
  241. return;
  242. if (update_count(data))
  243. tracing_off();
  244. }
  245. static void
  246. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  247. {
  248. if (tracing_is_on())
  249. return;
  250. tracing_on();
  251. }
  252. static void
  253. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  254. {
  255. if (!tracing_is_on())
  256. return;
  257. tracing_off();
  258. }
  259. /*
  260. * Skip 4:
  261. * ftrace_stacktrace()
  262. * function_trace_probe_call()
  263. * ftrace_ops_list_func()
  264. * ftrace_call()
  265. */
  266. #define STACK_SKIP 4
  267. static void
  268. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
  269. {
  270. trace_dump_stack(STACK_SKIP);
  271. }
  272. static void
  273. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
  274. {
  275. if (!tracing_is_on())
  276. return;
  277. if (update_count(data))
  278. trace_dump_stack(STACK_SKIP);
  279. }
  280. static void
  281. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  282. {
  283. if (update_count(data))
  284. ftrace_dump(DUMP_ALL);
  285. }
  286. /* Only dump the current CPU buffer. */
  287. static void
  288. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
  289. {
  290. if (update_count(data))
  291. ftrace_dump(DUMP_ORIG);
  292. }
  293. static int
  294. ftrace_probe_print(const char *name, struct seq_file *m,
  295. unsigned long ip, void *data)
  296. {
  297. long count = (long)data;
  298. seq_printf(m, "%ps:%s", (void *)ip, name);
  299. if (count == -1)
  300. seq_printf(m, ":unlimited\n");
  301. else
  302. seq_printf(m, ":count=%ld\n", count);
  303. return 0;
  304. }
  305. static int
  306. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  307. struct ftrace_probe_ops *ops, void *data)
  308. {
  309. return ftrace_probe_print("traceon", m, ip, data);
  310. }
  311. static int
  312. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  313. struct ftrace_probe_ops *ops, void *data)
  314. {
  315. return ftrace_probe_print("traceoff", m, ip, data);
  316. }
  317. static int
  318. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  319. struct ftrace_probe_ops *ops, void *data)
  320. {
  321. return ftrace_probe_print("stacktrace", m, ip, data);
  322. }
  323. static int
  324. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  325. struct ftrace_probe_ops *ops, void *data)
  326. {
  327. return ftrace_probe_print("dump", m, ip, data);
  328. }
  329. static int
  330. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  331. struct ftrace_probe_ops *ops, void *data)
  332. {
  333. return ftrace_probe_print("cpudump", m, ip, data);
  334. }
  335. static struct ftrace_probe_ops traceon_count_probe_ops = {
  336. .func = ftrace_traceon_count,
  337. .print = ftrace_traceon_print,
  338. };
  339. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  340. .func = ftrace_traceoff_count,
  341. .print = ftrace_traceoff_print,
  342. };
  343. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  344. .func = ftrace_stacktrace_count,
  345. .print = ftrace_stacktrace_print,
  346. };
  347. static struct ftrace_probe_ops dump_probe_ops = {
  348. .func = ftrace_dump_probe,
  349. .print = ftrace_dump_print,
  350. };
  351. static struct ftrace_probe_ops cpudump_probe_ops = {
  352. .func = ftrace_cpudump_probe,
  353. .print = ftrace_cpudump_print,
  354. };
  355. static struct ftrace_probe_ops traceon_probe_ops = {
  356. .func = ftrace_traceon,
  357. .print = ftrace_traceon_print,
  358. };
  359. static struct ftrace_probe_ops traceoff_probe_ops = {
  360. .func = ftrace_traceoff,
  361. .print = ftrace_traceoff_print,
  362. };
  363. static struct ftrace_probe_ops stacktrace_probe_ops = {
  364. .func = ftrace_stacktrace,
  365. .print = ftrace_stacktrace_print,
  366. };
  367. static int
  368. ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
  369. struct ftrace_hash *hash, char *glob,
  370. char *cmd, char *param, int enable)
  371. {
  372. void *count = (void *)-1;
  373. char *number;
  374. int ret;
  375. /* hash funcs only work with set_ftrace_filter */
  376. if (!enable)
  377. return -EINVAL;
  378. if (glob[0] == '!') {
  379. unregister_ftrace_function_probe_func(glob+1, ops);
  380. return 0;
  381. }
  382. if (!param)
  383. goto out_reg;
  384. number = strsep(&param, ":");
  385. if (!strlen(number))
  386. goto out_reg;
  387. /*
  388. * We use the callback data field (which is a pointer)
  389. * as our counter.
  390. */
  391. ret = kstrtoul(number, 0, (unsigned long *)&count);
  392. if (ret)
  393. return ret;
  394. out_reg:
  395. ret = register_ftrace_function_probe(glob, ops, count);
  396. return ret < 0 ? ret : 0;
  397. }
  398. static int
  399. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  400. char *glob, char *cmd, char *param, int enable)
  401. {
  402. struct ftrace_probe_ops *ops;
  403. /* we register both traceon and traceoff to this callback */
  404. if (strcmp(cmd, "traceon") == 0)
  405. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  406. else
  407. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  408. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  409. param, enable);
  410. }
  411. static int
  412. ftrace_stacktrace_callback(struct ftrace_hash *hash,
  413. char *glob, char *cmd, char *param, int enable)
  414. {
  415. struct ftrace_probe_ops *ops;
  416. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  417. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  418. param, enable);
  419. }
  420. static int
  421. ftrace_dump_callback(struct ftrace_hash *hash,
  422. char *glob, char *cmd, char *param, int enable)
  423. {
  424. struct ftrace_probe_ops *ops;
  425. ops = &dump_probe_ops;
  426. /* Only dump once. */
  427. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  428. "1", enable);
  429. }
  430. static int
  431. ftrace_cpudump_callback(struct ftrace_hash *hash,
  432. char *glob, char *cmd, char *param, int enable)
  433. {
  434. struct ftrace_probe_ops *ops;
  435. ops = &cpudump_probe_ops;
  436. /* Only dump once. */
  437. return ftrace_trace_probe_callback(ops, hash, glob, cmd,
  438. "1", enable);
  439. }
  440. static struct ftrace_func_command ftrace_traceon_cmd = {
  441. .name = "traceon",
  442. .func = ftrace_trace_onoff_callback,
  443. };
  444. static struct ftrace_func_command ftrace_traceoff_cmd = {
  445. .name = "traceoff",
  446. .func = ftrace_trace_onoff_callback,
  447. };
  448. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  449. .name = "stacktrace",
  450. .func = ftrace_stacktrace_callback,
  451. };
  452. static struct ftrace_func_command ftrace_dump_cmd = {
  453. .name = "dump",
  454. .func = ftrace_dump_callback,
  455. };
  456. static struct ftrace_func_command ftrace_cpudump_cmd = {
  457. .name = "cpudump",
  458. .func = ftrace_cpudump_callback,
  459. };
  460. static int __init init_func_cmd_traceon(void)
  461. {
  462. int ret;
  463. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  464. if (ret)
  465. return ret;
  466. ret = register_ftrace_command(&ftrace_traceon_cmd);
  467. if (ret)
  468. goto out_free_traceoff;
  469. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  470. if (ret)
  471. goto out_free_traceon;
  472. ret = register_ftrace_command(&ftrace_dump_cmd);
  473. if (ret)
  474. goto out_free_stacktrace;
  475. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  476. if (ret)
  477. goto out_free_dump;
  478. return 0;
  479. out_free_dump:
  480. unregister_ftrace_command(&ftrace_dump_cmd);
  481. out_free_stacktrace:
  482. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  483. out_free_traceon:
  484. unregister_ftrace_command(&ftrace_traceon_cmd);
  485. out_free_traceoff:
  486. unregister_ftrace_command(&ftrace_traceoff_cmd);
  487. return ret;
  488. }
  489. #else
  490. static inline int init_func_cmd_traceon(void)
  491. {
  492. return 0;
  493. }
  494. #endif /* CONFIG_DYNAMIC_FTRACE */
  495. static __init int init_function_trace(void)
  496. {
  497. init_func_cmd_traceon();
  498. return register_tracer(&function_trace);
  499. }
  500. core_initcall(init_function_trace);