entry-ftrace.S 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License version 2 as
  4. * published by the Free Software Foundation.
  5. */
  6. #include <asm/assembler.h>
  7. #include <asm/ftrace.h>
  8. #include <asm/unwind.h>
  9. #include "entry-header.S"
  10. /*
  11. * When compiling with -pg, gcc inserts a call to the mcount routine at the
  12. * start of every function. In mcount, apart from the function's address (in
  13. * lr), we need to get hold of the function's caller's address.
  14. *
  15. * Newer GCCs (4.4+) solve this problem by using a version of mcount with call
  16. * sites like:
  17. *
  18. * push {lr}
  19. * bl __gnu_mcount_nc
  20. *
  21. * With these compilers, frame pointers are not necessary.
  22. *
  23. * mcount can be thought of as a function called in the middle of a subroutine
  24. * call. As such, it needs to be transparent for both the caller and the
  25. * callee: the original lr needs to be restored when leaving mcount, and no
  26. * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
  27. * clobber the ip register. This is OK because the ARM calling convention
  28. * allows it to be clobbered in subroutines and doesn't use it to hold
  29. * parameters.)
  30. *
  31. * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
  32. * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
  33. */
  34. .macro mcount_adjust_addr rd, rn
  35. bic \rd, \rn, #1 @ clear the Thumb bit if present
  36. sub \rd, \rd, #MCOUNT_INSN_SIZE
  37. .endm
  38. .macro __mcount suffix
  39. mcount_enter
  40. ldr r0, =ftrace_trace_function
  41. ldr r2, [r0]
  42. adr r0, .Lftrace_stub
  43. cmp r0, r2
  44. bne 1f
  45. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  46. ldr r1, =ftrace_graph_return
  47. ldr r2, [r1]
  48. cmp r0, r2
  49. bne ftrace_graph_caller\suffix
  50. ldr r1, =ftrace_graph_entry
  51. ldr r2, [r1]
  52. ldr r0, =ftrace_graph_entry_stub
  53. cmp r0, r2
  54. bne ftrace_graph_caller\suffix
  55. #endif
  56. mcount_exit
  57. 1: mcount_get_lr r1 @ lr of instrumented func
  58. mcount_adjust_addr r0, lr @ instrumented function
  59. badr lr, 2f
  60. mov pc, r2
  61. 2: mcount_exit
  62. .endm
  63. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  64. .macro __ftrace_regs_caller
  65. sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
  66. @ OLD_R0 will overwrite previous LR
  67. add ip, sp, #12 @ move in IP the value of SP as it was
  68. @ before the push {lr} of the mcount mechanism
  69. str lr, [sp, #0] @ store LR instead of PC
  70. ldr lr, [sp, #8] @ get previous LR
  71. str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
  72. stmdb sp!, {ip, lr}
  73. stmdb sp!, {r0-r11, lr}
  74. @ stack content at this point:
  75. @ 0 4 48 52 56 60 64 68 72
  76. @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
  77. mov r3, sp @ struct pt_regs*
  78. ldr r2, =function_trace_op
  79. ldr r2, [r2] @ pointer to the current
  80. @ function tracing op
  81. ldr r1, [sp, #S_LR] @ lr of instrumented func
  82. ldr lr, [sp, #S_PC] @ get LR
  83. mcount_adjust_addr r0, lr @ instrumented function
  84. .globl ftrace_regs_call
  85. ftrace_regs_call:
  86. bl ftrace_stub
  87. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  88. .globl ftrace_graph_regs_call
  89. ftrace_graph_regs_call:
  90. mov r0, r0
  91. #endif
  92. @ pop saved regs
  93. ldmia sp!, {r0-r12} @ restore r0 through r12
  94. ldr ip, [sp, #8] @ restore PC
  95. ldr lr, [sp, #4] @ restore LR
  96. ldr sp, [sp, #0] @ restore SP
  97. mov pc, ip @ return
  98. .endm
  99. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  100. .macro __ftrace_graph_regs_caller
  101. sub r0, fp, #4 @ lr of instrumented routine (parent)
  102. @ called from __ftrace_regs_caller
  103. ldr r1, [sp, #S_PC] @ instrumented routine (func)
  104. mcount_adjust_addr r1, r1
  105. mov r2, fp @ frame pointer
  106. bl prepare_ftrace_return
  107. @ pop registers saved in ftrace_regs_caller
  108. ldmia sp!, {r0-r12} @ restore r0 through r12
  109. ldr ip, [sp, #8] @ restore PC
  110. ldr lr, [sp, #4] @ restore LR
  111. ldr sp, [sp, #0] @ restore SP
  112. mov pc, ip @ return
  113. .endm
  114. #endif
  115. #endif
  116. .macro __ftrace_caller suffix
  117. mcount_enter
  118. mcount_get_lr r1 @ lr of instrumented func
  119. mcount_adjust_addr r0, lr @ instrumented function
  120. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  121. ldr r2, =function_trace_op
  122. ldr r2, [r2] @ pointer to the current
  123. @ function tracing op
  124. mov r3, #0 @ regs is NULL
  125. #endif
  126. .globl ftrace_call\suffix
  127. ftrace_call\suffix:
  128. bl ftrace_stub
  129. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  130. .globl ftrace_graph_call\suffix
  131. ftrace_graph_call\suffix:
  132. mov r0, r0
  133. #endif
  134. mcount_exit
  135. .endm
  136. .macro __ftrace_graph_caller
  137. sub r0, fp, #4 @ &lr of instrumented routine (&parent)
  138. #ifdef CONFIG_DYNAMIC_FTRACE
  139. @ called from __ftrace_caller, saved in mcount_enter
  140. ldr r1, [sp, #16] @ instrumented routine (func)
  141. mcount_adjust_addr r1, r1
  142. #else
  143. @ called from __mcount, untouched in lr
  144. mcount_adjust_addr r1, lr @ instrumented routine (func)
  145. #endif
  146. mov r2, fp @ frame pointer
  147. bl prepare_ftrace_return
  148. mcount_exit
  149. .endm
  150. /*
  151. * __gnu_mcount_nc
  152. */
  153. .macro mcount_enter
  154. /*
  155. * This pad compensates for the push {lr} at the call site. Note that we are
  156. * unable to unwind through a function which does not otherwise save its lr.
  157. */
  158. UNWIND(.pad #4)
  159. stmdb sp!, {r0-r3, lr}
  160. UNWIND(.save {r0-r3, lr})
  161. .endm
  162. .macro mcount_get_lr reg
  163. ldr \reg, [sp, #20]
  164. .endm
  165. .macro mcount_exit
  166. ldmia sp!, {r0-r3, ip, lr}
  167. ret ip
  168. .endm
  169. ENTRY(__gnu_mcount_nc)
  170. UNWIND(.fnstart)
  171. #ifdef CONFIG_DYNAMIC_FTRACE
  172. mov ip, lr
  173. ldmia sp!, {lr}
  174. ret ip
  175. #else
  176. __mcount
  177. #endif
  178. UNWIND(.fnend)
  179. ENDPROC(__gnu_mcount_nc)
  180. #ifdef CONFIG_DYNAMIC_FTRACE
  181. ENTRY(ftrace_caller)
  182. UNWIND(.fnstart)
  183. __ftrace_caller
  184. UNWIND(.fnend)
  185. ENDPROC(ftrace_caller)
  186. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  187. ENTRY(ftrace_regs_caller)
  188. UNWIND(.fnstart)
  189. __ftrace_regs_caller
  190. UNWIND(.fnend)
  191. ENDPROC(ftrace_regs_caller)
  192. #endif
  193. #endif
  194. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  195. ENTRY(ftrace_graph_caller)
  196. UNWIND(.fnstart)
  197. __ftrace_graph_caller
  198. UNWIND(.fnend)
  199. ENDPROC(ftrace_graph_caller)
  200. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  201. ENTRY(ftrace_graph_regs_caller)
  202. UNWIND(.fnstart)
  203. __ftrace_graph_regs_caller
  204. UNWIND(.fnend)
  205. ENDPROC(ftrace_graph_regs_caller)
  206. #endif
  207. #endif
  208. .purgem mcount_enter
  209. .purgem mcount_get_lr
  210. .purgem mcount_exit
  211. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  212. .globl return_to_handler
  213. return_to_handler:
  214. stmdb sp!, {r0-r3}
  215. mov r0, fp @ frame pointer
  216. bl ftrace_return_to_handler
  217. mov lr, r0 @ r0 has real ret addr
  218. ldmia sp!, {r0-r3}
  219. ret lr
  220. #endif
  221. ENTRY(ftrace_stub)
  222. .Lftrace_stub:
  223. ret lr
  224. ENDPROC(ftrace_stub)