ftrace_64_mprofile.S 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * Split from ftrace_64.S
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/magic.h>
  10. #include <asm/ppc_asm.h>
  11. #include <asm/asm-offsets.h>
  12. #include <asm/ftrace.h>
  13. #include <asm/ppc-opcode.h>
  14. #include <asm/export.h>
  15. #include <asm/thread_info.h>
  16. #include <asm/bug.h>
  17. #include <asm/ptrace.h>
  18. #ifdef CONFIG_DYNAMIC_FTRACE
  19. /*
  20. *
  21. * ftrace_caller() is the function that replaces _mcount() when ftrace is
  22. * active.
  23. *
  24. * We arrive here after a function A calls function B, and we are the trace
  25. * function for B. When we enter r1 points to A's stack frame, B has not yet
  26. * had a chance to allocate one yet.
  27. *
  28. * Additionally r2 may point either to the TOC for A, or B, depending on
  29. * whether B did a TOC setup sequence before calling us.
  30. *
  31. * On entry the LR points back to the _mcount() call site, and r0 holds the
  32. * saved LR as it was on entry to B, ie. the original return address at the
  33. * call site in A.
  34. *
  35. * Our job is to save the register state into a struct pt_regs (on the stack)
  36. * and then arrange for the ftrace function to be called.
  37. */
  38. _GLOBAL(ftrace_caller)
  39. /* Save the original return address in A's stack frame */
  40. std r0,LRSAVE(r1)
  41. /* Create our stack frame + pt_regs */
  42. stdu r1,-SWITCH_FRAME_SIZE(r1)
  43. /* Save all gprs to pt_regs */
  44. SAVE_GPR(0, r1)
  45. SAVE_10GPRS(2, r1)
  46. SAVE_10GPRS(12, r1)
  47. SAVE_10GPRS(22, r1)
  48. /* Save previous stack pointer (r1) */
  49. addi r8, r1, SWITCH_FRAME_SIZE
  50. std r8, GPR1(r1)
  51. /* Load special regs for save below */
  52. mfmsr r8
  53. mfctr r9
  54. mfxer r10
  55. mfcr r11
  56. /* Get the _mcount() call site out of LR */
  57. mflr r7
  58. /* Save it as pt_regs->nip */
  59. std r7, _NIP(r1)
  60. /* Save the read LR in pt_regs->link */
  61. std r0, _LINK(r1)
  62. /* Save callee's TOC in the ABI compliant location */
  63. std r2, 24(r1)
  64. ld r2,PACATOC(r13) /* get kernel TOC in r2 */
  65. addis r3,r2,function_trace_op@toc@ha
  66. addi r3,r3,function_trace_op@toc@l
  67. ld r5,0(r3)
  68. #ifdef CONFIG_LIVEPATCH
  69. mr r14,r7 /* remember old NIP */
  70. #endif
  71. /* Calculate ip from nip-4 into r3 for call below */
  72. subi r3, r7, MCOUNT_INSN_SIZE
  73. /* Put the original return address in r4 as parent_ip */
  74. mr r4, r0
  75. /* Save special regs */
  76. std r8, _MSR(r1)
  77. std r9, _CTR(r1)
  78. std r10, _XER(r1)
  79. std r11, _CCR(r1)
  80. /* Load &pt_regs in r6 for call below */
  81. addi r6, r1 ,STACK_FRAME_OVERHEAD
  82. /* ftrace_call(r3, r4, r5, r6) */
  83. .globl ftrace_call
  84. ftrace_call:
  85. bl ftrace_stub
  86. nop
  87. /* Load the possibly modified NIP */
  88. ld r15, _NIP(r1)
  89. #ifdef CONFIG_LIVEPATCH
  90. cmpd r14, r15 /* has NIP been altered? */
  91. #endif
  92. #if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
  93. /* NIP has not been altered, skip over further checks */
  94. beq 1f
  95. /* Check if there is an active jprobe on us */
  96. subi r3, r14, 4
  97. bl __is_active_jprobe
  98. nop
  99. /*
  100. * If r3 == 1, then this is a kprobe/jprobe.
  101. * else, this is livepatched function.
  102. *
  103. * The conditional branch for livepatch_handler below will use the
  104. * result of this comparison. For kprobe/jprobe, we just need to branch to
  105. * the new NIP, not call livepatch_handler. The branch below is bne, so we
  106. * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
  107. * CR0[EQ] = (r3 == 1).
  108. */
  109. cmpdi r3, 1
  110. 1:
  111. #endif
  112. /* Load CTR with the possibly modified NIP */
  113. mtctr r15
  114. /* Restore gprs */
  115. REST_GPR(0,r1)
  116. REST_10GPRS(2,r1)
  117. REST_10GPRS(12,r1)
  118. REST_10GPRS(22,r1)
  119. /* Restore possibly modified LR */
  120. ld r0, _LINK(r1)
  121. mtlr r0
  122. /* Restore callee's TOC */
  123. ld r2, 24(r1)
  124. /* Pop our stack frame */
  125. addi r1, r1, SWITCH_FRAME_SIZE
  126. #ifdef CONFIG_LIVEPATCH
  127. /*
  128. * Based on the cmpd or cmpdi above, if the NIP was altered and we're
  129. * not on a kprobe/jprobe, then handle livepatch.
  130. */
  131. bne- livepatch_handler
  132. #endif
  133. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  134. .globl ftrace_graph_call
  135. ftrace_graph_call:
  136. b ftrace_graph_stub
  137. _GLOBAL(ftrace_graph_stub)
  138. #endif
  139. bctr /* jump after _mcount site */
  140. _GLOBAL(ftrace_stub)
  141. blr
  142. #ifdef CONFIG_LIVEPATCH
  143. /*
  144. * This function runs in the mcount context, between two functions. As
  145. * such it can only clobber registers which are volatile and used in
  146. * function linkage.
  147. *
  148. * We get here when a function A, calls another function B, but B has
  149. * been live patched with a new function C.
  150. *
  151. * On entry:
  152. * - we have no stack frame and can not allocate one
  153. * - LR points back to the original caller (in A)
  154. * - CTR holds the new NIP in C
  155. * - r0, r11 & r12 are free
  156. */
  157. livepatch_handler:
  158. CURRENT_THREAD_INFO(r12, r1)
  159. /* Allocate 3 x 8 bytes */
  160. ld r11, TI_livepatch_sp(r12)
  161. addi r11, r11, 24
  162. std r11, TI_livepatch_sp(r12)
  163. /* Save toc & real LR on livepatch stack */
  164. std r2, -24(r11)
  165. mflr r12
  166. std r12, -16(r11)
  167. /* Store stack end marker */
  168. lis r12, STACK_END_MAGIC@h
  169. ori r12, r12, STACK_END_MAGIC@l
  170. std r12, -8(r11)
  171. /* Put ctr in r12 for global entry and branch there */
  172. mfctr r12
  173. bctrl
  174. /*
  175. * Now we are returning from the patched function to the original
  176. * caller A. We are free to use r11, r12 and we can use r2 until we
  177. * restore it.
  178. */
  179. CURRENT_THREAD_INFO(r12, r1)
  180. ld r11, TI_livepatch_sp(r12)
  181. /* Check stack marker hasn't been trashed */
  182. lis r2, STACK_END_MAGIC@h
  183. ori r2, r2, STACK_END_MAGIC@l
  184. ld r12, -8(r11)
  185. 1: tdne r12, r2
  186. EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
  187. /* Restore LR & toc from livepatch stack */
  188. ld r12, -16(r11)
  189. mtlr r12
  190. ld r2, -24(r11)
  191. /* Pop livepatch stack frame */
  192. CURRENT_THREAD_INFO(r12, r1)
  193. subi r11, r11, 24
  194. std r11, TI_livepatch_sp(r12)
  195. /* Return to original caller of live patched function */
  196. blr
  197. #endif /* CONFIG_LIVEPATCH */
  198. #endif /* CONFIG_DYNAMIC_FTRACE */
  199. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  200. _GLOBAL(ftrace_graph_caller)
  201. stdu r1, -112(r1)
  202. /* with -mprofile-kernel, parameter regs are still alive at _mcount */
  203. std r10, 104(r1)
  204. std r9, 96(r1)
  205. std r8, 88(r1)
  206. std r7, 80(r1)
  207. std r6, 72(r1)
  208. std r5, 64(r1)
  209. std r4, 56(r1)
  210. std r3, 48(r1)
  211. /* Save callee's TOC in the ABI compliant location */
  212. std r2, 24(r1)
  213. ld r2, PACATOC(r13) /* get kernel TOC in r2 */
  214. mfctr r4 /* ftrace_caller has moved local addr here */
  215. std r4, 40(r1)
  216. mflr r3 /* ftrace_caller has restored LR from stack */
  217. subi r4, r4, MCOUNT_INSN_SIZE
  218. bl prepare_ftrace_return
  219. nop
  220. /*
  221. * prepare_ftrace_return gives us the address we divert to.
  222. * Change the LR to this.
  223. */
  224. mtlr r3
  225. ld r0, 40(r1)
  226. mtctr r0
  227. ld r10, 104(r1)
  228. ld r9, 96(r1)
  229. ld r8, 88(r1)
  230. ld r7, 80(r1)
  231. ld r6, 72(r1)
  232. ld r5, 64(r1)
  233. ld r4, 56(r1)
  234. ld r3, 48(r1)
  235. /* Restore callee's TOC */
  236. ld r2, 24(r1)
  237. addi r1, r1, 112
  238. mflr r0
  239. std r0, LRSAVE(r1)
  240. bctr
  241. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */