trace_functions.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 Nadia Yvette Chambers
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/slab.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static void tracing_start_function_trace(struct trace_array *tr);
  20. static void tracing_stop_function_trace(struct trace_array *tr);
  21. static void
  22. function_trace_call(unsigned long ip, unsigned long parent_ip,
  23. struct ftrace_ops *op, struct pt_regs *pt_regs);
  24. static void
  25. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  26. struct ftrace_ops *op, struct pt_regs *pt_regs);
  27. static struct tracer_flags func_flags;
  28. /* Our option */
  29. enum {
  30. TRACE_FUNC_OPT_STACK = 0x1,
  31. };
  32. static int allocate_ftrace_ops(struct trace_array *tr)
  33. {
  34. struct ftrace_ops *ops;
  35. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  36. if (!ops)
  37. return -ENOMEM;
  38. /* Currently only the non stack verision is supported */
  39. ops->func = function_trace_call;
  40. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
  41. tr->ops = ops;
  42. ops->private = tr;
  43. return 0;
  44. }
  45. int ftrace_create_function_files(struct trace_array *tr,
  46. struct dentry *parent)
  47. {
  48. int ret;
  49. /*
  50. * The top level array uses the "global_ops", and the files are
  51. * created on boot up.
  52. */
  53. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  54. return 0;
  55. ret = allocate_ftrace_ops(tr);
  56. if (ret)
  57. return ret;
  58. ftrace_create_filter_files(tr->ops, parent);
  59. return 0;
  60. }
  61. void ftrace_destroy_function_files(struct trace_array *tr)
  62. {
  63. ftrace_destroy_filter_files(tr->ops);
  64. kfree(tr->ops);
  65. tr->ops = NULL;
  66. }
  67. static int function_trace_init(struct trace_array *tr)
  68. {
  69. ftrace_func_t func;
  70. /*
  71. * Instance trace_arrays get their ops allocated
  72. * at instance creation. Unless it failed
  73. * the allocation.
  74. */
  75. if (!tr->ops)
  76. return -ENOMEM;
  77. /* Currently only the global instance can do stack tracing */
  78. if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  79. func_flags.val & TRACE_FUNC_OPT_STACK)
  80. func = function_stack_trace_call;
  81. else
  82. func = function_trace_call;
  83. ftrace_init_array_ops(tr, func);
  84. tr->trace_buffer.cpu = get_cpu();
  85. put_cpu();
  86. tracing_start_cmdline_record();
  87. tracing_start_function_trace(tr);
  88. return 0;
  89. }
  90. static void function_trace_reset(struct trace_array *tr)
  91. {
  92. tracing_stop_function_trace(tr);
  93. tracing_stop_cmdline_record();
  94. ftrace_reset_array_ops(tr);
  95. }
  96. static void function_trace_start(struct trace_array *tr)
  97. {
  98. tracing_reset_online_cpus(&tr->trace_buffer);
  99. }
  100. static void
  101. function_trace_call(unsigned long ip, unsigned long parent_ip,
  102. struct ftrace_ops *op, struct pt_regs *pt_regs)
  103. {
  104. struct trace_array *tr = op->private;
  105. struct trace_array_cpu *data;
  106. unsigned long flags;
  107. int bit;
  108. int cpu;
  109. int pc;
  110. if (unlikely(!tr->function_enabled))
  111. return;
  112. pc = preempt_count();
  113. preempt_disable_notrace();
  114. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  115. if (bit < 0)
  116. goto out;
  117. cpu = smp_processor_id();
  118. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  119. if (!atomic_read(&data->disabled)) {
  120. local_save_flags(flags);
  121. trace_function(tr, ip, parent_ip, flags, pc);
  122. }
  123. trace_clear_recursion(bit);
  124. out:
  125. preempt_enable_notrace();
  126. }
  127. static void
  128. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  129. struct ftrace_ops *op, struct pt_regs *pt_regs)
  130. {
  131. struct trace_array *tr = op->private;
  132. struct trace_array_cpu *data;
  133. unsigned long flags;
  134. long disabled;
  135. int cpu;
  136. int pc;
  137. if (unlikely(!tr->function_enabled))
  138. return;
  139. /*
  140. * Need to use raw, since this must be called before the
  141. * recursive protection is performed.
  142. */
  143. local_irq_save(flags);
  144. cpu = raw_smp_processor_id();
  145. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  146. disabled = atomic_inc_return(&data->disabled);
  147. if (likely(disabled == 1)) {
  148. pc = preempt_count();
  149. trace_function(tr, ip, parent_ip, flags, pc);
  150. /*
  151. * skip over 5 funcs:
  152. * __ftrace_trace_stack,
  153. * __trace_stack,
  154. * function_stack_trace_call
  155. * ftrace_list_func
  156. * ftrace_call
  157. */
  158. __trace_stack(tr, flags, 5, pc);
  159. }
  160. atomic_dec(&data->disabled);
  161. local_irq_restore(flags);
  162. }
  163. static struct tracer_opt func_opts[] = {
  164. #ifdef CONFIG_STACKTRACE
  165. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  166. #endif
  167. { } /* Always set a last empty entry */
  168. };
  169. static struct tracer_flags func_flags = {
  170. .val = 0, /* By default: all flags disabled */
  171. .opts = func_opts
  172. };
  173. static void tracing_start_function_trace(struct trace_array *tr)
  174. {
  175. tr->function_enabled = 0;
  176. register_ftrace_function(tr->ops);
  177. tr->function_enabled = 1;
  178. }
  179. static void tracing_stop_function_trace(struct trace_array *tr)
  180. {
  181. tr->function_enabled = 0;
  182. unregister_ftrace_function(tr->ops);
  183. }
  184. static struct tracer function_trace;
  185. static int
  186. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  187. {
  188. switch (bit) {
  189. case TRACE_FUNC_OPT_STACK:
  190. /* do nothing if already set */
  191. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  192. break;
  193. /* We can change this flag when not running. */
  194. if (tr->current_trace != &function_trace)
  195. break;
  196. unregister_ftrace_function(tr->ops);
  197. if (set) {
  198. tr->ops->func = function_stack_trace_call;
  199. register_ftrace_function(tr->ops);
  200. } else {
  201. tr->ops->func = function_trace_call;
  202. register_ftrace_function(tr->ops);
  203. }
  204. break;
  205. default:
  206. return -EINVAL;
  207. }
  208. return 0;
  209. }
  210. static struct tracer function_trace __tracer_data =
  211. {
  212. .name = "function",
  213. .init = function_trace_init,
  214. .reset = function_trace_reset,
  215. .start = function_trace_start,
  216. .flags = &func_flags,
  217. .set_flag = func_set_flag,
  218. .allow_instances = true,
  219. #ifdef CONFIG_FTRACE_SELFTEST
  220. .selftest = trace_selftest_startup_function,
  221. #endif
  222. };
  223. #ifdef CONFIG_DYNAMIC_FTRACE
  224. static void update_traceon_count(struct ftrace_probe_ops *ops,
  225. unsigned long ip,
  226. struct trace_array *tr, bool on,
  227. void *data)
  228. {
  229. struct ftrace_func_mapper *mapper = data;
  230. long *count;
  231. long old_count;
  232. /*
  233. * Tracing gets disabled (or enabled) once per count.
  234. * This function can be called at the same time on multiple CPUs.
  235. * It is fine if both disable (or enable) tracing, as disabling
  236. * (or enabling) the second time doesn't do anything as the
  237. * state of the tracer is already disabled (or enabled).
  238. * What needs to be synchronized in this case is that the count
  239. * only gets decremented once, even if the tracer is disabled
  240. * (or enabled) twice, as the second one is really a nop.
  241. *
  242. * The memory barriers guarantee that we only decrement the
  243. * counter once. First the count is read to a local variable
  244. * and a read barrier is used to make sure that it is loaded
  245. * before checking if the tracer is in the state we want.
  246. * If the tracer is not in the state we want, then the count
  247. * is guaranteed to be the old count.
  248. *
  249. * Next the tracer is set to the state we want (disabled or enabled)
  250. * then a write memory barrier is used to make sure that
  251. * the new state is visible before changing the counter by
  252. * one minus the old counter. This guarantees that another CPU
  253. * executing this code will see the new state before seeing
  254. * the new counter value, and would not do anything if the new
  255. * counter is seen.
  256. *
  257. * Note, there is no synchronization between this and a user
  258. * setting the tracing_on file. But we currently don't care
  259. * about that.
  260. */
  261. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  262. old_count = *count;
  263. if (old_count <= 0)
  264. return;
  265. /* Make sure we see count before checking tracing state */
  266. smp_rmb();
  267. if (on == !!tracer_tracing_is_on(tr))
  268. return;
  269. if (on)
  270. tracer_tracing_on(tr);
  271. else
  272. tracer_tracing_off(tr);
  273. /* Make sure tracing state is visible before updating count */
  274. smp_wmb();
  275. *count = old_count - 1;
  276. }
  277. static void
  278. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
  279. struct trace_array *tr, struct ftrace_probe_ops *ops,
  280. void *data)
  281. {
  282. update_traceon_count(ops, ip, tr, 1, data);
  283. }
  284. static void
  285. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
  286. struct trace_array *tr, struct ftrace_probe_ops *ops,
  287. void *data)
  288. {
  289. update_traceon_count(ops, ip, tr, 0, data);
  290. }
  291. static void
  292. ftrace_traceon(unsigned long ip, unsigned long parent_ip,
  293. struct trace_array *tr, struct ftrace_probe_ops *ops,
  294. void *data)
  295. {
  296. if (tracer_tracing_is_on(tr))
  297. return;
  298. tracer_tracing_on(tr);
  299. }
  300. static void
  301. ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
  302. struct trace_array *tr, struct ftrace_probe_ops *ops,
  303. void *data)
  304. {
  305. if (!tracer_tracing_is_on(tr))
  306. return;
  307. tracer_tracing_off(tr);
  308. }
  309. /*
  310. * Skip 4:
  311. * ftrace_stacktrace()
  312. * function_trace_probe_call()
  313. * ftrace_ops_list_func()
  314. * ftrace_call()
  315. */
  316. #define STACK_SKIP 4
  317. static __always_inline void trace_stack(struct trace_array *tr)
  318. {
  319. unsigned long flags;
  320. int pc;
  321. local_save_flags(flags);
  322. pc = preempt_count();
  323. __trace_stack(tr, flags, STACK_SKIP, pc);
  324. }
  325. static void
  326. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
  327. struct trace_array *tr, struct ftrace_probe_ops *ops,
  328. void *data)
  329. {
  330. trace_stack(tr);
  331. }
  332. static void
  333. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
  334. struct trace_array *tr, struct ftrace_probe_ops *ops,
  335. void *data)
  336. {
  337. struct ftrace_func_mapper *mapper = data;
  338. long *count;
  339. long old_count;
  340. long new_count;
  341. if (!tracing_is_on())
  342. return;
  343. /* unlimited? */
  344. if (!mapper) {
  345. trace_stack(tr);
  346. return;
  347. }
  348. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  349. /*
  350. * Stack traces should only execute the number of times the
  351. * user specified in the counter.
  352. */
  353. do {
  354. old_count = *count;
  355. if (!old_count)
  356. return;
  357. new_count = old_count - 1;
  358. new_count = cmpxchg(count, old_count, new_count);
  359. if (new_count == old_count)
  360. trace_stack(tr);
  361. if (!tracing_is_on())
  362. return;
  363. } while (new_count != old_count);
  364. }
  365. static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
  366. void *data)
  367. {
  368. struct ftrace_func_mapper *mapper = data;
  369. long *count = NULL;
  370. if (mapper)
  371. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  372. if (count) {
  373. if (*count <= 0)
  374. return 0;
  375. (*count)--;
  376. }
  377. return 1;
  378. }
  379. static void
  380. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
  381. struct trace_array *tr, struct ftrace_probe_ops *ops,
  382. void *data)
  383. {
  384. if (update_count(ops, ip, data))
  385. ftrace_dump(DUMP_ALL);
  386. }
  387. /* Only dump the current CPU buffer. */
  388. static void
  389. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
  390. struct trace_array *tr, struct ftrace_probe_ops *ops,
  391. void *data)
  392. {
  393. if (update_count(ops, ip, data))
  394. ftrace_dump(DUMP_ORIG);
  395. }
  396. static int
  397. ftrace_probe_print(const char *name, struct seq_file *m,
  398. unsigned long ip, struct ftrace_probe_ops *ops,
  399. void *data)
  400. {
  401. struct ftrace_func_mapper *mapper = data;
  402. long *count = NULL;
  403. seq_printf(m, "%ps:%s", (void *)ip, name);
  404. if (mapper)
  405. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  406. if (count)
  407. seq_printf(m, ":count=%ld\n", *count);
  408. else
  409. seq_puts(m, ":unlimited\n");
  410. return 0;
  411. }
  412. static int
  413. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  414. struct ftrace_probe_ops *ops,
  415. void *data)
  416. {
  417. return ftrace_probe_print("traceon", m, ip, ops, data);
  418. }
  419. static int
  420. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  421. struct ftrace_probe_ops *ops, void *data)
  422. {
  423. return ftrace_probe_print("traceoff", m, ip, ops, data);
  424. }
  425. static int
  426. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  427. struct ftrace_probe_ops *ops, void *data)
  428. {
  429. return ftrace_probe_print("stacktrace", m, ip, ops, data);
  430. }
  431. static int
  432. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  433. struct ftrace_probe_ops *ops, void *data)
  434. {
  435. return ftrace_probe_print("dump", m, ip, ops, data);
  436. }
  437. static int
  438. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  439. struct ftrace_probe_ops *ops, void *data)
  440. {
  441. return ftrace_probe_print("cpudump", m, ip, ops, data);
  442. }
  443. static int
  444. ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
  445. unsigned long ip, void *init_data, void **data)
  446. {
  447. struct ftrace_func_mapper *mapper = *data;
  448. if (!mapper) {
  449. mapper = allocate_ftrace_func_mapper();
  450. if (!mapper)
  451. return -ENOMEM;
  452. *data = mapper;
  453. }
  454. return ftrace_func_mapper_add_ip(mapper, ip, init_data);
  455. }
  456. static void
  457. ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
  458. unsigned long ip, void *data)
  459. {
  460. struct ftrace_func_mapper *mapper = data;
  461. if (!ip) {
  462. free_ftrace_func_mapper(mapper, NULL);
  463. return;
  464. }
  465. ftrace_func_mapper_remove_ip(mapper, ip);
  466. }
  467. static struct ftrace_probe_ops traceon_count_probe_ops = {
  468. .func = ftrace_traceon_count,
  469. .print = ftrace_traceon_print,
  470. .init = ftrace_count_init,
  471. .free = ftrace_count_free,
  472. };
  473. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  474. .func = ftrace_traceoff_count,
  475. .print = ftrace_traceoff_print,
  476. .init = ftrace_count_init,
  477. .free = ftrace_count_free,
  478. };
  479. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  480. .func = ftrace_stacktrace_count,
  481. .print = ftrace_stacktrace_print,
  482. .init = ftrace_count_init,
  483. .free = ftrace_count_free,
  484. };
  485. static struct ftrace_probe_ops dump_probe_ops = {
  486. .func = ftrace_dump_probe,
  487. .print = ftrace_dump_print,
  488. .init = ftrace_count_init,
  489. .free = ftrace_count_free,
  490. };
  491. static struct ftrace_probe_ops cpudump_probe_ops = {
  492. .func = ftrace_cpudump_probe,
  493. .print = ftrace_cpudump_print,
  494. };
  495. static struct ftrace_probe_ops traceon_probe_ops = {
  496. .func = ftrace_traceon,
  497. .print = ftrace_traceon_print,
  498. };
  499. static struct ftrace_probe_ops traceoff_probe_ops = {
  500. .func = ftrace_traceoff,
  501. .print = ftrace_traceoff_print,
  502. };
  503. static struct ftrace_probe_ops stacktrace_probe_ops = {
  504. .func = ftrace_stacktrace,
  505. .print = ftrace_stacktrace_print,
  506. };
  507. static int
  508. ftrace_trace_probe_callback(struct trace_array *tr,
  509. struct ftrace_probe_ops *ops,
  510. struct ftrace_hash *hash, char *glob,
  511. char *cmd, char *param, int enable)
  512. {
  513. void *count = (void *)-1;
  514. char *number;
  515. int ret;
  516. /* hash funcs only work with set_ftrace_filter */
  517. if (!enable)
  518. return -EINVAL;
  519. if (glob[0] == '!')
  520. return unregister_ftrace_function_probe_func(glob+1, tr, ops);
  521. if (!param)
  522. goto out_reg;
  523. number = strsep(&param, ":");
  524. if (!strlen(number))
  525. goto out_reg;
  526. /*
  527. * We use the callback data field (which is a pointer)
  528. * as our counter.
  529. */
  530. ret = kstrtoul(number, 0, (unsigned long *)&count);
  531. if (ret)
  532. return ret;
  533. out_reg:
  534. ret = register_ftrace_function_probe(glob, tr, ops, count);
  535. return ret < 0 ? ret : 0;
  536. }
  537. static int
  538. ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
  539. char *glob, char *cmd, char *param, int enable)
  540. {
  541. struct ftrace_probe_ops *ops;
  542. if (!tr)
  543. return -ENODEV;
  544. /* we register both traceon and traceoff to this callback */
  545. if (strcmp(cmd, "traceon") == 0)
  546. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  547. else
  548. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  549. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  550. param, enable);
  551. }
  552. static int
  553. ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
  554. char *glob, char *cmd, char *param, int enable)
  555. {
  556. struct ftrace_probe_ops *ops;
  557. if (!tr)
  558. return -ENODEV;
  559. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  560. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  561. param, enable);
  562. }
  563. static int
  564. ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  565. char *glob, char *cmd, char *param, int enable)
  566. {
  567. struct ftrace_probe_ops *ops;
  568. if (!tr)
  569. return -ENODEV;
  570. ops = &dump_probe_ops;
  571. /* Only dump once. */
  572. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  573. "1", enable);
  574. }
  575. static int
  576. ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  577. char *glob, char *cmd, char *param, int enable)
  578. {
  579. struct ftrace_probe_ops *ops;
  580. if (!tr)
  581. return -ENODEV;
  582. ops = &cpudump_probe_ops;
  583. /* Only dump once. */
  584. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  585. "1", enable);
  586. }
  587. static struct ftrace_func_command ftrace_traceon_cmd = {
  588. .name = "traceon",
  589. .func = ftrace_trace_onoff_callback,
  590. };
  591. static struct ftrace_func_command ftrace_traceoff_cmd = {
  592. .name = "traceoff",
  593. .func = ftrace_trace_onoff_callback,
  594. };
  595. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  596. .name = "stacktrace",
  597. .func = ftrace_stacktrace_callback,
  598. };
  599. static struct ftrace_func_command ftrace_dump_cmd = {
  600. .name = "dump",
  601. .func = ftrace_dump_callback,
  602. };
  603. static struct ftrace_func_command ftrace_cpudump_cmd = {
  604. .name = "cpudump",
  605. .func = ftrace_cpudump_callback,
  606. };
  607. static int __init init_func_cmd_traceon(void)
  608. {
  609. int ret;
  610. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  611. if (ret)
  612. return ret;
  613. ret = register_ftrace_command(&ftrace_traceon_cmd);
  614. if (ret)
  615. goto out_free_traceoff;
  616. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  617. if (ret)
  618. goto out_free_traceon;
  619. ret = register_ftrace_command(&ftrace_dump_cmd);
  620. if (ret)
  621. goto out_free_stacktrace;
  622. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  623. if (ret)
  624. goto out_free_dump;
  625. return 0;
  626. out_free_dump:
  627. unregister_ftrace_command(&ftrace_dump_cmd);
  628. out_free_stacktrace:
  629. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  630. out_free_traceon:
  631. unregister_ftrace_command(&ftrace_traceon_cmd);
  632. out_free_traceoff:
  633. unregister_ftrace_command(&ftrace_traceoff_cmd);
  634. return ret;
  635. }
  636. #else
  637. static inline int init_func_cmd_traceon(void)
  638. {
  639. return 0;
  640. }
  641. #endif /* CONFIG_DYNAMIC_FTRACE */
  642. __init int init_function_trace(void)
  643. {
  644. init_func_cmd_traceon();
  645. return register_tracer(&function_trace);
  646. }