ftrace.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * Ftrace header. For implementation details beyond the random comments
  3. * scattered below, see: Documentation/trace/ftrace-design.txt
  4. */
  5. #ifndef _LINUX_FTRACE_H
  6. #define _LINUX_FTRACE_H
  7. #include <linux/trace_clock.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/linkage.h>
  10. #include <linux/bitops.h>
  11. #include <linux/ktime.h>
  12. #include <linux/sched.h>
  13. #include <linux/types.h>
  14. #include <linux/init.h>
  15. #include <linux/fs.h>
  16. #include <asm/ftrace.h>
  17. /*
  18. * If the arch supports passing the variable contents of
  19. * function_trace_op as the third parameter back from the
  20. * mcount call, then the arch should define this as 1.
  21. */
  22. #ifndef ARCH_SUPPORTS_FTRACE_OPS
  23. #define ARCH_SUPPORTS_FTRACE_OPS 0
  24. #endif
  25. /*
  26. * If the arch's mcount caller does not support all of ftrace's
  27. * features, then it must call an indirect function that
  28. * does. Or at least does enough to prevent any unwelcomed side effects.
  29. */
  30. #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
  31. !ARCH_SUPPORTS_FTRACE_OPS
  32. # define FTRACE_FORCE_LIST_FUNC 1
  33. #else
  34. # define FTRACE_FORCE_LIST_FUNC 0
  35. #endif
  36. struct module;
  37. struct ftrace_hash;
  38. #ifdef CONFIG_FUNCTION_TRACER
  39. extern int ftrace_enabled;
  40. extern int
  41. ftrace_enable_sysctl(struct ctl_table *table, int write,
  42. void __user *buffer, size_t *lenp,
  43. loff_t *ppos);
  44. struct ftrace_ops;
  45. typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  46. struct ftrace_ops *op);
  47. /*
  48. * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
  49. * set in the flags member.
  50. *
  51. * ENABLED - set/unset when ftrace_ops is registered/unregistered
  52. * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
  53. * is part of the global tracers sharing the same filter
  54. * via set_ftrace_* debugfs files.
  55. * DYNAMIC - set when ftrace_ops is registered to denote dynamically
  56. * allocated ftrace_ops which need special care
  57. * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
  58. * could be controled by following calls:
  59. * ftrace_function_local_enable
  60. * ftrace_function_local_disable
  61. */
  62. enum {
  63. FTRACE_OPS_FL_ENABLED = 1 << 0,
  64. FTRACE_OPS_FL_GLOBAL = 1 << 1,
  65. FTRACE_OPS_FL_DYNAMIC = 1 << 2,
  66. FTRACE_OPS_FL_CONTROL = 1 << 3,
  67. };
  68. struct ftrace_ops {
  69. ftrace_func_t func;
  70. struct ftrace_ops *next;
  71. unsigned long flags;
  72. int __percpu *disabled;
  73. #ifdef CONFIG_DYNAMIC_FTRACE
  74. struct ftrace_hash *notrace_hash;
  75. struct ftrace_hash *filter_hash;
  76. #endif
  77. };
  78. extern int function_trace_stop;
  79. /*
  80. * Type of the current tracing.
  81. */
  82. enum ftrace_tracing_type_t {
  83. FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
  84. FTRACE_TYPE_RETURN, /* Hook the return of the function */
  85. };
  86. /* Current tracing type, default is FTRACE_TYPE_ENTER */
  87. extern enum ftrace_tracing_type_t ftrace_tracing_type;
  88. /**
  89. * ftrace_stop - stop function tracer.
  90. *
  91. * A quick way to stop the function tracer. Note this an on off switch,
  92. * it is not something that is recursive like preempt_disable.
  93. * This does not disable the calling of mcount, it only stops the
  94. * calling of functions from mcount.
  95. */
  96. static inline void ftrace_stop(void)
  97. {
  98. function_trace_stop = 1;
  99. }
  100. /**
  101. * ftrace_start - start the function tracer.
  102. *
  103. * This function is the inverse of ftrace_stop. This does not enable
  104. * the function tracing if the function tracer is disabled. This only
  105. * sets the function tracer flag to continue calling the functions
  106. * from mcount.
  107. */
  108. static inline void ftrace_start(void)
  109. {
  110. function_trace_stop = 0;
  111. }
  112. /*
  113. * The ftrace_ops must be a static and should also
  114. * be read_mostly. These functions do modify read_mostly variables
  115. * so use them sparely. Never free an ftrace_op or modify the
  116. * next pointer after it has been registered. Even after unregistering
  117. * it, the next pointer may still be used internally.
  118. */
  119. int register_ftrace_function(struct ftrace_ops *ops);
  120. int unregister_ftrace_function(struct ftrace_ops *ops);
  121. void clear_ftrace_function(void);
  122. /**
  123. * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
  124. *
  125. * This function enables tracing on current cpu by decreasing
  126. * the per cpu control variable.
  127. * It must be called with preemption disabled and only on ftrace_ops
  128. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  129. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  130. */
  131. static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
  132. {
  133. if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
  134. return;
  135. (*this_cpu_ptr(ops->disabled))--;
  136. }
  137. /**
  138. * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
  139. *
  140. * This function enables tracing on current cpu by decreasing
  141. * the per cpu control variable.
  142. * It must be called with preemption disabled and only on ftrace_ops
  143. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  144. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  145. */
  146. static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
  147. {
  148. if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
  149. return;
  150. (*this_cpu_ptr(ops->disabled))++;
  151. }
  152. /**
  153. * ftrace_function_local_disabled - returns ftrace_ops disabled value
  154. * on current cpu
  155. *
  156. * This function returns value of ftrace_ops::disabled on current cpu.
  157. * It must be called with preemption disabled and only on ftrace_ops
  158. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  159. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  160. */
  161. static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
  162. {
  163. WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
  164. return *this_cpu_ptr(ops->disabled);
  165. }
  166. extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op);
  167. #else /* !CONFIG_FUNCTION_TRACER */
  168. /*
  169. * (un)register_ftrace_function must be a macro since the ops parameter
  170. * must not be evaluated.
  171. */
  172. #define register_ftrace_function(ops) ({ 0; })
  173. #define unregister_ftrace_function(ops) ({ 0; })
  174. static inline void clear_ftrace_function(void) { }
  175. static inline void ftrace_kill(void) { }
  176. static inline void ftrace_stop(void) { }
  177. static inline void ftrace_start(void) { }
  178. #endif /* CONFIG_FUNCTION_TRACER */
  179. #ifdef CONFIG_STACK_TRACER
  180. extern int stack_tracer_enabled;
  181. int
  182. stack_trace_sysctl(struct ctl_table *table, int write,
  183. void __user *buffer, size_t *lenp,
  184. loff_t *ppos);
  185. #endif
  186. struct ftrace_func_command {
  187. struct list_head list;
  188. char *name;
  189. int (*func)(struct ftrace_hash *hash,
  190. char *func, char *cmd,
  191. char *params, int enable);
  192. };
  193. #ifdef CONFIG_DYNAMIC_FTRACE
  194. int ftrace_arch_code_modify_prepare(void);
  195. int ftrace_arch_code_modify_post_process(void);
  196. void ftrace_bug(int err, unsigned long ip);
  197. struct seq_file;
  198. struct ftrace_probe_ops {
  199. void (*func)(unsigned long ip,
  200. unsigned long parent_ip,
  201. void **data);
  202. int (*callback)(unsigned long ip, void **data);
  203. void (*free)(void **data);
  204. int (*print)(struct seq_file *m,
  205. unsigned long ip,
  206. struct ftrace_probe_ops *ops,
  207. void *data);
  208. };
  209. extern int
  210. register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  211. void *data);
  212. extern void
  213. unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  214. void *data);
  215. extern void
  216. unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
  217. extern void unregister_ftrace_function_probe_all(char *glob);
  218. extern int ftrace_text_reserved(void *start, void *end);
  219. enum {
  220. FTRACE_FL_ENABLED = (1 << 30),
  221. };
  222. #define FTRACE_FL_MASK (0x3UL << 30)
  223. #define FTRACE_REF_MAX ((1 << 30) - 1)
  224. struct dyn_ftrace {
  225. union {
  226. unsigned long ip; /* address of mcount call-site */
  227. struct dyn_ftrace *freelist;
  228. };
  229. unsigned long flags;
  230. struct dyn_arch_ftrace arch;
  231. };
  232. int ftrace_force_update(void);
  233. int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
  234. int len, int reset);
  235. int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
  236. int len, int reset);
  237. void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
  238. void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
  239. void ftrace_free_filter(struct ftrace_ops *ops);
  240. int register_ftrace_command(struct ftrace_func_command *cmd);
  241. int unregister_ftrace_command(struct ftrace_func_command *cmd);
  242. enum {
  243. FTRACE_UPDATE_CALLS = (1 << 0),
  244. FTRACE_DISABLE_CALLS = (1 << 1),
  245. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  246. FTRACE_START_FUNC_RET = (1 << 3),
  247. FTRACE_STOP_FUNC_RET = (1 << 4),
  248. };
  249. enum {
  250. FTRACE_UPDATE_IGNORE,
  251. FTRACE_UPDATE_MAKE_CALL,
  252. FTRACE_UPDATE_MAKE_NOP,
  253. };
  254. enum {
  255. FTRACE_ITER_FILTER = (1 << 0),
  256. FTRACE_ITER_NOTRACE = (1 << 1),
  257. FTRACE_ITER_PRINTALL = (1 << 2),
  258. FTRACE_ITER_DO_HASH = (1 << 3),
  259. FTRACE_ITER_HASH = (1 << 4),
  260. FTRACE_ITER_ENABLED = (1 << 5),
  261. };
  262. void arch_ftrace_update_code(int command);
  263. struct ftrace_rec_iter;
  264. struct ftrace_rec_iter *ftrace_rec_iter_start(void);
  265. struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
  266. struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
  267. #define for_ftrace_rec_iter(iter) \
  268. for (iter = ftrace_rec_iter_start(); \
  269. iter; \
  270. iter = ftrace_rec_iter_next(iter))
  271. int ftrace_update_record(struct dyn_ftrace *rec, int enable);
  272. int ftrace_test_record(struct dyn_ftrace *rec, int enable);
  273. void ftrace_run_stop_machine(int command);
  274. unsigned long ftrace_location(unsigned long ip);
  275. extern ftrace_func_t ftrace_trace_function;
  276. int ftrace_regex_open(struct ftrace_ops *ops, int flag,
  277. struct inode *inode, struct file *file);
  278. ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  279. size_t cnt, loff_t *ppos);
  280. ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  281. size_t cnt, loff_t *ppos);
  282. loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin);
  283. int ftrace_regex_release(struct inode *inode, struct file *file);
  284. void __init
  285. ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
  286. /* defined in arch */
  287. extern int ftrace_ip_converted(unsigned long ip);
  288. extern int ftrace_dyn_arch_init(void *data);
  289. extern void ftrace_replace_code(int enable);
  290. extern int ftrace_update_ftrace_func(ftrace_func_t func);
  291. extern void ftrace_caller(void);
  292. extern void ftrace_call(void);
  293. extern void mcount_call(void);
  294. void ftrace_modify_all_code(int command);
  295. #ifndef FTRACE_ADDR
  296. #define FTRACE_ADDR ((unsigned long)ftrace_caller)
  297. #endif
  298. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  299. extern void ftrace_graph_caller(void);
  300. extern int ftrace_enable_ftrace_graph_caller(void);
  301. extern int ftrace_disable_ftrace_graph_caller(void);
  302. #else
  303. static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
  304. static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
  305. #endif
  306. /**
  307. * ftrace_make_nop - convert code into nop
  308. * @mod: module structure if called by module load initialization
  309. * @rec: the mcount call site record
  310. * @addr: the address that the call site should be calling
  311. *
  312. * This is a very sensitive operation and great care needs
  313. * to be taken by the arch. The operation should carefully
  314. * read the location, check to see if what is read is indeed
  315. * what we expect it to be, and then on success of the compare,
  316. * it should write to the location.
  317. *
  318. * The code segment at @rec->ip should be a caller to @addr
  319. *
  320. * Return must be:
  321. * 0 on success
  322. * -EFAULT on error reading the location
  323. * -EINVAL on a failed compare of the contents
  324. * -EPERM on error writing to the location
  325. * Any other value will be considered a failure.
  326. */
  327. extern int ftrace_make_nop(struct module *mod,
  328. struct dyn_ftrace *rec, unsigned long addr);
  329. /**
  330. * ftrace_make_call - convert a nop call site into a call to addr
  331. * @rec: the mcount call site record
  332. * @addr: the address that the call site should call
  333. *
  334. * This is a very sensitive operation and great care needs
  335. * to be taken by the arch. The operation should carefully
  336. * read the location, check to see if what is read is indeed
  337. * what we expect it to be, and then on success of the compare,
  338. * it should write to the location.
  339. *
  340. * The code segment at @rec->ip should be a nop
  341. *
  342. * Return must be:
  343. * 0 on success
  344. * -EFAULT on error reading the location
  345. * -EINVAL on a failed compare of the contents
  346. * -EPERM on error writing to the location
  347. * Any other value will be considered a failure.
  348. */
  349. extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
  350. /* May be defined in arch */
  351. extern int ftrace_arch_read_dyn_info(char *buf, int size);
  352. extern int skip_trace(unsigned long ip);
  353. extern void ftrace_disable_daemon(void);
  354. extern void ftrace_enable_daemon(void);
  355. #else
  356. static inline int skip_trace(unsigned long ip) { return 0; }
  357. static inline int ftrace_force_update(void) { return 0; }
  358. static inline void ftrace_disable_daemon(void) { }
  359. static inline void ftrace_enable_daemon(void) { }
  360. static inline void ftrace_release_mod(struct module *mod) {}
  361. static inline int register_ftrace_command(struct ftrace_func_command *cmd)
  362. {
  363. return -EINVAL;
  364. }
  365. static inline int unregister_ftrace_command(char *cmd_name)
  366. {
  367. return -EINVAL;
  368. }
  369. static inline int ftrace_text_reserved(void *start, void *end)
  370. {
  371. return 0;
  372. }
  373. /*
  374. * Again users of functions that have ftrace_ops may not
  375. * have them defined when ftrace is not enabled, but these
  376. * functions may still be called. Use a macro instead of inline.
  377. */
  378. #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
  379. #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
  380. #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
  381. #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
  382. #define ftrace_free_filter(ops) do { } while (0)
  383. static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  384. size_t cnt, loff_t *ppos) { return -ENODEV; }
  385. static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  386. size_t cnt, loff_t *ppos) { return -ENODEV; }
  387. static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  388. {
  389. return -ENODEV;
  390. }
  391. static inline int
  392. ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
  393. #endif /* CONFIG_DYNAMIC_FTRACE */
  394. /* totally disable ftrace - can not re-enable after this */
  395. void ftrace_kill(void);
  396. static inline void tracer_disable(void)
  397. {
  398. #ifdef CONFIG_FUNCTION_TRACER
  399. ftrace_enabled = 0;
  400. #endif
  401. }
  402. /*
  403. * Ftrace disable/restore without lock. Some synchronization mechanism
  404. * must be used to prevent ftrace_enabled to be changed between
  405. * disable/restore.
  406. */
  407. static inline int __ftrace_enabled_save(void)
  408. {
  409. #ifdef CONFIG_FUNCTION_TRACER
  410. int saved_ftrace_enabled = ftrace_enabled;
  411. ftrace_enabled = 0;
  412. return saved_ftrace_enabled;
  413. #else
  414. return 0;
  415. #endif
  416. }
  417. static inline void __ftrace_enabled_restore(int enabled)
  418. {
  419. #ifdef CONFIG_FUNCTION_TRACER
  420. ftrace_enabled = enabled;
  421. #endif
  422. }
  423. #ifndef HAVE_ARCH_CALLER_ADDR
  424. # ifdef CONFIG_FRAME_POINTER
  425. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  426. # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
  427. # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
  428. # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
  429. # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
  430. # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
  431. # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
  432. # else
  433. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  434. # define CALLER_ADDR1 0UL
  435. # define CALLER_ADDR2 0UL
  436. # define CALLER_ADDR3 0UL
  437. # define CALLER_ADDR4 0UL
  438. # define CALLER_ADDR5 0UL
  439. # define CALLER_ADDR6 0UL
  440. # endif
  441. #endif /* ifndef HAVE_ARCH_CALLER_ADDR */
  442. #ifdef CONFIG_IRQSOFF_TRACER
  443. extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  444. extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
  445. #else
  446. static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  447. static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
  448. #endif
  449. #ifdef CONFIG_PREEMPT_TRACER
  450. extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  451. extern void trace_preempt_off(unsigned long a0, unsigned long a1);
  452. #else
  453. /*
  454. * Use defines instead of static inlines because some arches will make code out
  455. * of the CALLER_ADDR, when we really want these to be a real nop.
  456. */
  457. # define trace_preempt_on(a0, a1) do { } while (0)
  458. # define trace_preempt_off(a0, a1) do { } while (0)
  459. #endif
  460. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  461. extern void ftrace_init(void);
  462. #else
  463. static inline void ftrace_init(void) { }
  464. #endif
  465. /*
  466. * Structure that defines an entry function trace.
  467. */
  468. struct ftrace_graph_ent {
  469. unsigned long func; /* Current function */
  470. int depth;
  471. };
  472. /*
  473. * Structure that defines a return function trace.
  474. */
  475. struct ftrace_graph_ret {
  476. unsigned long func; /* Current function */
  477. unsigned long long calltime;
  478. unsigned long long rettime;
  479. /* Number of functions that overran the depth limit for current task */
  480. unsigned long overrun;
  481. int depth;
  482. };
  483. /* Type of the callback handlers for tracing function graph*/
  484. typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
  485. typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
  486. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  487. /* for init task */
  488. #define INIT_FTRACE_GRAPH .ret_stack = NULL,
  489. /*
  490. * Stack of return addresses for functions
  491. * of a thread.
  492. * Used in struct thread_info
  493. */
  494. struct ftrace_ret_stack {
  495. unsigned long ret;
  496. unsigned long func;
  497. unsigned long long calltime;
  498. unsigned long long subtime;
  499. unsigned long fp;
  500. };
  501. /*
  502. * Primary handler of a function return.
  503. * It relays on ftrace_return_to_handler.
  504. * Defined in entry_32/64.S
  505. */
  506. extern void return_to_handler(void);
  507. extern int
  508. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  509. unsigned long frame_pointer);
  510. /*
  511. * Sometimes we don't want to trace a function with the function
  512. * graph tracer but we want them to keep traced by the usual function
  513. * tracer if the function graph tracer is not configured.
  514. */
  515. #define __notrace_funcgraph notrace
  516. /*
  517. * We want to which function is an entrypoint of a hardirq.
  518. * That will help us to put a signal on output.
  519. */
  520. #define __irq_entry __attribute__((__section__(".irqentry.text")))
  521. /* Limits of hardirq entrypoints */
  522. extern char __irqentry_text_start[];
  523. extern char __irqentry_text_end[];
  524. #define FTRACE_RETFUNC_DEPTH 50
  525. #define FTRACE_RETSTACK_ALLOC_SIZE 32
  526. extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  527. trace_func_graph_ent_t entryfunc);
  528. extern void ftrace_graph_stop(void);
  529. /* The current handlers in use */
  530. extern trace_func_graph_ret_t ftrace_graph_return;
  531. extern trace_func_graph_ent_t ftrace_graph_entry;
  532. extern void unregister_ftrace_graph(void);
  533. extern void ftrace_graph_init_task(struct task_struct *t);
  534. extern void ftrace_graph_exit_task(struct task_struct *t);
  535. extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
  536. static inline int task_curr_ret_stack(struct task_struct *t)
  537. {
  538. return t->curr_ret_stack;
  539. }
  540. static inline void pause_graph_tracing(void)
  541. {
  542. atomic_inc(&current->tracing_graph_pause);
  543. }
  544. static inline void unpause_graph_tracing(void)
  545. {
  546. atomic_dec(&current->tracing_graph_pause);
  547. }
  548. #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
  549. #define __notrace_funcgraph
  550. #define __irq_entry
  551. #define INIT_FTRACE_GRAPH
  552. static inline void ftrace_graph_init_task(struct task_struct *t) { }
  553. static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  554. static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
  555. static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  556. trace_func_graph_ent_t entryfunc)
  557. {
  558. return -1;
  559. }
  560. static inline void unregister_ftrace_graph(void) { }
  561. static inline int task_curr_ret_stack(struct task_struct *tsk)
  562. {
  563. return -1;
  564. }
  565. static inline void pause_graph_tracing(void) { }
  566. static inline void unpause_graph_tracing(void) { }
  567. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  568. #ifdef CONFIG_TRACING
  569. /* flags for current->trace */
  570. enum {
  571. TSK_TRACE_FL_TRACE_BIT = 0,
  572. TSK_TRACE_FL_GRAPH_BIT = 1,
  573. };
  574. enum {
  575. TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
  576. TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
  577. };
  578. static inline void set_tsk_trace_trace(struct task_struct *tsk)
  579. {
  580. set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  581. }
  582. static inline void clear_tsk_trace_trace(struct task_struct *tsk)
  583. {
  584. clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  585. }
  586. static inline int test_tsk_trace_trace(struct task_struct *tsk)
  587. {
  588. return tsk->trace & TSK_TRACE_FL_TRACE;
  589. }
  590. static inline void set_tsk_trace_graph(struct task_struct *tsk)
  591. {
  592. set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  593. }
  594. static inline void clear_tsk_trace_graph(struct task_struct *tsk)
  595. {
  596. clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  597. }
  598. static inline int test_tsk_trace_graph(struct task_struct *tsk)
  599. {
  600. return tsk->trace & TSK_TRACE_FL_GRAPH;
  601. }
  602. enum ftrace_dump_mode;
  603. extern enum ftrace_dump_mode ftrace_dump_on_oops;
  604. #ifdef CONFIG_PREEMPT
  605. #define INIT_TRACE_RECURSION .trace_recursion = 0,
  606. #endif
  607. #endif /* CONFIG_TRACING */
  608. #ifndef INIT_TRACE_RECURSION
  609. #define INIT_TRACE_RECURSION
  610. #endif
  611. #ifdef CONFIG_FTRACE_SYSCALLS
  612. unsigned long arch_syscall_addr(int nr);
  613. #endif /* CONFIG_FTRACE_SYSCALLS */
  614. #endif /* _LINUX_FTRACE_H */