ftrace.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * Dynamic function tracer architecture backend.
  3. *
  4. * Copyright IBM Corp. 2009,2014
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  8. */
  9. #include <linux/hardirq.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/kernel.h>
  13. #include <linux/types.h>
  14. #include <linux/kprobes.h>
  15. #include <trace/syscall.h>
  16. #include <asm/asm-offsets.h>
  17. #include "entry.h"
  18. void mcount_replace_code(void);
  19. void ftrace_disable_code(void);
  20. void ftrace_enable_insn(void);
  21. /*
  22. * The mcount code looks like this:
  23. * stg %r14,8(%r15) # offset 0
  24. * larl %r1,<&counter> # offset 6
  25. * brasl %r14,_mcount # offset 12
  26. * lg %r14,8(%r15) # offset 18
  27. * Total length is 24 bytes. The complete mcount block initially gets replaced
  28. * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
  29. * only patch the jg/lg instruction within the block.
  30. * Note: we do not patch the first instruction to an unconditional branch,
  31. * since that would break kprobes/jprobes. It is easier to leave the larl
  32. * instruction in and only modify the second instruction.
  33. * The enabled ftrace code block looks like this:
  34. * larl %r0,.+24 # offset 0
  35. * > lg %r1,__LC_FTRACE_FUNC # offset 6
  36. * br %r1 # offset 12
  37. * brcl 0,0 # offset 14
  38. * brc 0,0 # offset 20
  39. * The ftrace function gets called with a non-standard C function call ABI
  40. * where r0 contains the return address. It is also expected that the called
  41. * function only clobbers r0 and r1, but restores r2-r15.
  42. * The return point of the ftrace function has offset 24, so execution
  43. * continues behind the mcount block.
  44. * larl %r0,.+24 # offset 0
  45. * > jg .+18 # offset 6
  46. * br %r1 # offset 12
  47. * brcl 0,0 # offset 14
  48. * brc 0,0 # offset 20
  49. * The jg instruction branches to offset 24 to skip as many instructions
  50. * as possible.
  51. */
  52. asm(
  53. " .align 4\n"
  54. "mcount_replace_code:\n"
  55. " larl %r0,0f\n"
  56. "ftrace_disable_code:\n"
  57. " jg 0f\n"
  58. " br %r1\n"
  59. " brcl 0,0\n"
  60. " brc 0,0\n"
  61. "0:\n"
  62. " .align 4\n"
  63. "ftrace_enable_insn:\n"
  64. " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
  65. #define MCOUNT_BLOCK_SIZE 24
  66. #define MCOUNT_INSN_OFFSET 6
  67. #define FTRACE_INSN_SIZE 6
  68. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  69. unsigned long addr)
  70. {
  71. return 0;
  72. }
  73. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  74. unsigned long addr)
  75. {
  76. /* Initial replacement of the whole mcount block */
  77. if (addr == MCOUNT_ADDR) {
  78. if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
  79. mcount_replace_code,
  80. MCOUNT_BLOCK_SIZE))
  81. return -EPERM;
  82. return 0;
  83. }
  84. if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
  85. MCOUNT_INSN_SIZE))
  86. return -EPERM;
  87. return 0;
  88. }
  89. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  90. {
  91. if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
  92. FTRACE_INSN_SIZE))
  93. return -EPERM;
  94. return 0;
  95. }
  96. int ftrace_update_ftrace_func(ftrace_func_t func)
  97. {
  98. return 0;
  99. }
  100. int __init ftrace_dyn_arch_init(void)
  101. {
  102. return 0;
  103. }
  104. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  105. /*
  106. * Hook the return address and push it in the stack of return addresses
  107. * in current thread info.
  108. */
  109. unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
  110. unsigned long ip)
  111. {
  112. struct ftrace_graph_ent trace;
  113. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  114. goto out;
  115. ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
  116. trace.func = ip;
  117. trace.depth = current->curr_ret_stack + 1;
  118. /* Only trace if the calling function expects to. */
  119. if (!ftrace_graph_entry(&trace))
  120. goto out;
  121. if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
  122. goto out;
  123. parent = (unsigned long) return_to_handler;
  124. out:
  125. return parent;
  126. }
  127. /*
  128. * Patch the kernel code at ftrace_graph_caller location. The instruction
  129. * there is branch relative on condition. To enable the ftrace graph code
  130. * block, we simply patch the mask field of the instruction to zero and
  131. * turn the instruction into a nop.
  132. * To disable the ftrace graph code the mask field will be patched to
  133. * all ones, which turns the instruction into an unconditional branch.
  134. */
  135. int ftrace_enable_ftrace_graph_caller(void)
  136. {
  137. u8 op = 0x04; /* set mask field to zero */
  138. return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
  139. }
  140. int ftrace_disable_ftrace_graph_caller(void)
  141. {
  142. u8 op = 0xf4; /* set mask field to all ones */
  143. return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
  144. }
  145. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */