ftrace.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5. * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
  6. *
  7. * For licencing details, see COPYING.
  8. *
  9. * Defines low-level handling of mcount calls when the kernel
  10. * is compiled with the -pg flag. When using dynamic ftrace, the
  11. * mcount call-sites get patched with NOP till they are enabled.
  12. * All code mutation routines here are called under stop_machine().
  13. */
  14. #include <linux/ftrace.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/module.h>
  17. #include <linux/stop_machine.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/opcodes.h>
  20. #include <asm/ftrace.h>
  21. #include <asm/insn.h>
  22. #include <asm/set_memory.h>
  23. #ifdef CONFIG_THUMB2_KERNEL
  24. #define NOP 0xf85deb04 /* pop.w {lr} */
  25. #else
  26. #define NOP 0xe8bd4000 /* pop {lr} */
  27. #endif
  28. #ifdef CONFIG_DYNAMIC_FTRACE
  29. static int __ftrace_modify_code(void *data)
  30. {
  31. int *command = data;
  32. set_kernel_text_rw();
  33. ftrace_modify_all_code(*command);
  34. set_kernel_text_ro();
  35. return 0;
  36. }
  37. void arch_ftrace_update_code(int command)
  38. {
  39. stop_machine(__ftrace_modify_code, &command, NULL);
  40. }
  41. #ifdef CONFIG_OLD_MCOUNT
  42. #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
  43. #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
  44. #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
  45. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  46. {
  47. return rec->arch.old_mcount ? OLD_NOP : NOP;
  48. }
  49. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  50. {
  51. if (!rec->arch.old_mcount)
  52. return addr;
  53. if (addr == MCOUNT_ADDR)
  54. addr = OLD_MCOUNT_ADDR;
  55. else if (addr == FTRACE_ADDR)
  56. addr = OLD_FTRACE_ADDR;
  57. return addr;
  58. }
  59. #else
  60. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  61. {
  62. return NOP;
  63. }
  64. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  65. {
  66. return addr;
  67. }
  68. #endif
  69. int ftrace_arch_code_modify_prepare(void)
  70. {
  71. set_all_modules_text_rw();
  72. return 0;
  73. }
  74. int ftrace_arch_code_modify_post_process(void)
  75. {
  76. set_all_modules_text_ro();
  77. /* Make sure any TLB misses during machine stop are cleared. */
  78. flush_tlb_all();
  79. return 0;
  80. }
  81. static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
  82. {
  83. return arm_gen_branch_link(pc, addr);
  84. }
  85. static int ftrace_modify_code(unsigned long pc, unsigned long old,
  86. unsigned long new, bool validate)
  87. {
  88. unsigned long replaced;
  89. if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
  90. old = __opcode_to_mem_thumb32(old);
  91. new = __opcode_to_mem_thumb32(new);
  92. } else {
  93. old = __opcode_to_mem_arm(old);
  94. new = __opcode_to_mem_arm(new);
  95. }
  96. if (validate) {
  97. if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
  98. return -EFAULT;
  99. if (replaced != old)
  100. return -EINVAL;
  101. }
  102. if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
  103. return -EPERM;
  104. flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
  105. return 0;
  106. }
  107. int ftrace_update_ftrace_func(ftrace_func_t func)
  108. {
  109. unsigned long pc;
  110. unsigned long new;
  111. int ret;
  112. pc = (unsigned long)&ftrace_call;
  113. new = ftrace_call_replace(pc, (unsigned long)func);
  114. ret = ftrace_modify_code(pc, 0, new, false);
  115. #ifdef CONFIG_OLD_MCOUNT
  116. if (!ret) {
  117. pc = (unsigned long)&ftrace_call_old;
  118. new = ftrace_call_replace(pc, (unsigned long)func);
  119. ret = ftrace_modify_code(pc, 0, new, false);
  120. }
  121. #endif
  122. return ret;
  123. }
  124. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  125. {
  126. unsigned long new, old;
  127. unsigned long ip = rec->ip;
  128. old = ftrace_nop_replace(rec);
  129. new = ftrace_call_replace(ip, adjust_address(rec, addr));
  130. return ftrace_modify_code(rec->ip, old, new, true);
  131. }
  132. int ftrace_make_nop(struct module *mod,
  133. struct dyn_ftrace *rec, unsigned long addr)
  134. {
  135. unsigned long ip = rec->ip;
  136. unsigned long old;
  137. unsigned long new;
  138. int ret;
  139. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  140. new = ftrace_nop_replace(rec);
  141. ret = ftrace_modify_code(ip, old, new, true);
  142. #ifdef CONFIG_OLD_MCOUNT
  143. if (ret == -EINVAL && addr == MCOUNT_ADDR) {
  144. rec->arch.old_mcount = true;
  145. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  146. new = ftrace_nop_replace(rec);
  147. ret = ftrace_modify_code(ip, old, new, true);
  148. }
  149. #endif
  150. return ret;
  151. }
  152. int __init ftrace_dyn_arch_init(void)
  153. {
  154. return 0;
  155. }
  156. #endif /* CONFIG_DYNAMIC_FTRACE */
  157. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  158. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  159. unsigned long frame_pointer)
  160. {
  161. unsigned long return_hooker = (unsigned long) &return_to_handler;
  162. struct ftrace_graph_ent trace;
  163. unsigned long old;
  164. int err;
  165. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  166. return;
  167. old = *parent;
  168. *parent = return_hooker;
  169. trace.func = self_addr;
  170. trace.depth = current->curr_ret_stack + 1;
  171. /* Only trace if the calling function expects to */
  172. if (!ftrace_graph_entry(&trace)) {
  173. *parent = old;
  174. return;
  175. }
  176. err = ftrace_push_return_trace(old, self_addr, &trace.depth,
  177. frame_pointer, NULL);
  178. if (err == -EBUSY) {
  179. *parent = old;
  180. return;
  181. }
  182. }
  183. #ifdef CONFIG_DYNAMIC_FTRACE
  184. extern unsigned long ftrace_graph_call;
  185. extern unsigned long ftrace_graph_call_old;
  186. extern void ftrace_graph_caller_old(void);
  187. static int __ftrace_modify_caller(unsigned long *callsite,
  188. void (*func) (void), bool enable)
  189. {
  190. unsigned long caller_fn = (unsigned long) func;
  191. unsigned long pc = (unsigned long) callsite;
  192. unsigned long branch = arm_gen_branch(pc, caller_fn);
  193. unsigned long nop = 0xe1a00000; /* mov r0, r0 */
  194. unsigned long old = enable ? nop : branch;
  195. unsigned long new = enable ? branch : nop;
  196. return ftrace_modify_code(pc, old, new, true);
  197. }
  198. static int ftrace_modify_graph_caller(bool enable)
  199. {
  200. int ret;
  201. ret = __ftrace_modify_caller(&ftrace_graph_call,
  202. ftrace_graph_caller,
  203. enable);
  204. #ifdef CONFIG_OLD_MCOUNT
  205. if (!ret)
  206. ret = __ftrace_modify_caller(&ftrace_graph_call_old,
  207. ftrace_graph_caller_old,
  208. enable);
  209. #endif
  210. return ret;
  211. }
  212. int ftrace_enable_ftrace_graph_caller(void)
  213. {
  214. return ftrace_modify_graph_caller(true);
  215. }
  216. int ftrace_disable_ftrace_graph_caller(void)
  217. {
  218. return ftrace_modify_graph_caller(false);
  219. }
  220. #endif /* CONFIG_DYNAMIC_FTRACE */
  221. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */