ftrace.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5. * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
  6. *
  7. * For licencing details, see COPYING.
  8. *
  9. * Defines low-level handling of mcount calls when the kernel
  10. * is compiled with the -pg flag. When using dynamic ftrace, the
  11. * mcount call-sites get patched with NOP till they are enabled.
  12. * All code mutation routines here are called under stop_machine().
  13. */
  14. #include <linux/ftrace.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/module.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/opcodes.h>
  19. #include <asm/ftrace.h>
  20. #include "insn.h"
  21. #ifdef CONFIG_THUMB2_KERNEL
  22. #define NOP 0xf85deb04 /* pop.w {lr} */
  23. #else
  24. #define NOP 0xe8bd4000 /* pop {lr} */
  25. #endif
  26. #ifdef CONFIG_DYNAMIC_FTRACE
  27. #ifdef CONFIG_OLD_MCOUNT
  28. #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
  29. #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
  30. #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
  31. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  32. {
  33. return rec->arch.old_mcount ? OLD_NOP : NOP;
  34. }
  35. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  36. {
  37. if (!rec->arch.old_mcount)
  38. return addr;
  39. if (addr == MCOUNT_ADDR)
  40. addr = OLD_MCOUNT_ADDR;
  41. else if (addr == FTRACE_ADDR)
  42. addr = OLD_FTRACE_ADDR;
  43. return addr;
  44. }
  45. #else
  46. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  47. {
  48. return NOP;
  49. }
  50. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  51. {
  52. return addr;
  53. }
  54. #endif
  55. int ftrace_arch_code_modify_prepare(void)
  56. {
  57. set_all_modules_text_rw();
  58. return 0;
  59. }
  60. int ftrace_arch_code_modify_post_process(void)
  61. {
  62. set_all_modules_text_ro();
  63. return 0;
  64. }
  65. static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
  66. {
  67. return arm_gen_branch_link(pc, addr);
  68. }
  69. static int ftrace_modify_code(unsigned long pc, unsigned long old,
  70. unsigned long new, bool validate)
  71. {
  72. unsigned long replaced;
  73. if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
  74. old = __opcode_to_mem_thumb32(old);
  75. new = __opcode_to_mem_thumb32(new);
  76. } else {
  77. old = __opcode_to_mem_arm(old);
  78. new = __opcode_to_mem_arm(new);
  79. }
  80. if (validate) {
  81. if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
  82. return -EFAULT;
  83. if (replaced != old)
  84. return -EINVAL;
  85. }
  86. if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
  87. return -EPERM;
  88. flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
  89. return 0;
  90. }
  91. int ftrace_update_ftrace_func(ftrace_func_t func)
  92. {
  93. unsigned long pc;
  94. unsigned long new;
  95. int ret;
  96. pc = (unsigned long)&ftrace_call;
  97. new = ftrace_call_replace(pc, (unsigned long)func);
  98. ret = ftrace_modify_code(pc, 0, new, false);
  99. #ifdef CONFIG_OLD_MCOUNT
  100. if (!ret) {
  101. pc = (unsigned long)&ftrace_call_old;
  102. new = ftrace_call_replace(pc, (unsigned long)func);
  103. ret = ftrace_modify_code(pc, 0, new, false);
  104. }
  105. #endif
  106. return ret;
  107. }
  108. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  109. {
  110. unsigned long new, old;
  111. unsigned long ip = rec->ip;
  112. old = ftrace_nop_replace(rec);
  113. new = ftrace_call_replace(ip, adjust_address(rec, addr));
  114. return ftrace_modify_code(rec->ip, old, new, true);
  115. }
  116. int ftrace_make_nop(struct module *mod,
  117. struct dyn_ftrace *rec, unsigned long addr)
  118. {
  119. unsigned long ip = rec->ip;
  120. unsigned long old;
  121. unsigned long new;
  122. int ret;
  123. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  124. new = ftrace_nop_replace(rec);
  125. ret = ftrace_modify_code(ip, old, new, true);
  126. #ifdef CONFIG_OLD_MCOUNT
  127. if (ret == -EINVAL && addr == MCOUNT_ADDR) {
  128. rec->arch.old_mcount = true;
  129. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  130. new = ftrace_nop_replace(rec);
  131. ret = ftrace_modify_code(ip, old, new, true);
  132. }
  133. #endif
  134. return ret;
  135. }
  136. int __init ftrace_dyn_arch_init(void)
  137. {
  138. return 0;
  139. }
  140. #endif /* CONFIG_DYNAMIC_FTRACE */
  141. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  142. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  143. unsigned long frame_pointer)
  144. {
  145. unsigned long return_hooker = (unsigned long) &return_to_handler;
  146. struct ftrace_graph_ent trace;
  147. unsigned long old;
  148. int err;
  149. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  150. return;
  151. old = *parent;
  152. *parent = return_hooker;
  153. trace.func = self_addr;
  154. trace.depth = current->curr_ret_stack + 1;
  155. /* Only trace if the calling function expects to */
  156. if (!ftrace_graph_entry(&trace)) {
  157. *parent = old;
  158. return;
  159. }
  160. err = ftrace_push_return_trace(old, self_addr, &trace.depth,
  161. frame_pointer);
  162. if (err == -EBUSY) {
  163. *parent = old;
  164. return;
  165. }
  166. }
  167. #ifdef CONFIG_DYNAMIC_FTRACE
  168. extern unsigned long ftrace_graph_call;
  169. extern unsigned long ftrace_graph_call_old;
  170. extern void ftrace_graph_caller_old(void);
  171. static int __ftrace_modify_caller(unsigned long *callsite,
  172. void (*func) (void), bool enable)
  173. {
  174. unsigned long caller_fn = (unsigned long) func;
  175. unsigned long pc = (unsigned long) callsite;
  176. unsigned long branch = arm_gen_branch(pc, caller_fn);
  177. unsigned long nop = 0xe1a00000; /* mov r0, r0 */
  178. unsigned long old = enable ? nop : branch;
  179. unsigned long new = enable ? branch : nop;
  180. return ftrace_modify_code(pc, old, new, true);
  181. }
  182. static int ftrace_modify_graph_caller(bool enable)
  183. {
  184. int ret;
  185. ret = __ftrace_modify_caller(&ftrace_graph_call,
  186. ftrace_graph_caller,
  187. enable);
  188. #ifdef CONFIG_OLD_MCOUNT
  189. if (!ret)
  190. ret = __ftrace_modify_caller(&ftrace_graph_call_old,
  191. ftrace_graph_caller_old,
  192. enable);
  193. #endif
  194. return ret;
  195. }
  196. int ftrace_enable_ftrace_graph_caller(void)
  197. {
  198. return ftrace_modify_graph_caller(true);
  199. }
  200. int ftrace_disable_ftrace_graph_caller(void)
  201. {
  202. return ftrace_modify_graph_caller(false);
  203. }
  204. #endif /* CONFIG_DYNAMIC_FTRACE */
  205. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */