|
@@ -1,7 +1,7 @@
|
|
|
/*
|
|
|
* Dynamic function tracer architecture backend.
|
|
|
*
|
|
|
- * Copyright IBM Corp. 2009
|
|
|
+ * Copyright IBM Corp. 2009,2014
|
|
|
*
|
|
|
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
|
|
|
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
@@ -17,6 +17,7 @@
|
|
|
#include <asm/asm-offsets.h>
|
|
|
#include "entry.h"
|
|
|
|
|
|
+void mcount_replace_code(void);
|
|
|
void ftrace_disable_code(void);
|
|
|
void ftrace_enable_insn(void);
|
|
|
|
|
@@ -24,38 +25,50 @@ void ftrace_enable_insn(void);
|
|
|
/*
|
|
|
* The 64-bit mcount code looks like this:
|
|
|
* stg %r14,8(%r15) # offset 0
|
|
|
- * > larl %r1,<&counter> # offset 6
|
|
|
- * > brasl %r14,_mcount # offset 12
|
|
|
+ * larl %r1,<&counter> # offset 6
|
|
|
+ * brasl %r14,_mcount # offset 12
|
|
|
* lg %r14,8(%r15) # offset 18
|
|
|
- * Total length is 24 bytes. The middle two instructions of the mcount
|
|
|
- * block get overwritten by ftrace_make_nop / ftrace_make_call.
|
|
|
+ * Total length is 24 bytes. The complete mcount block initially gets replaced
|
|
|
+ * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
|
|
|
+ * only patch the jg/lg instruction within the block.
|
|
|
+ * Note: we do not patch the first instruction to an unconditional branch,
|
|
|
+ * since that would break kprobes/jprobes. It is easier to leave the larl
|
|
|
+ * instruction in and only modify the second instruction.
|
|
|
* The 64-bit enabled ftrace code block looks like this:
|
|
|
- * stg %r14,8(%r15) # offset 0
|
|
|
+ * larl %r0,.+24 # offset 0
|
|
|
* > lg %r1,__LC_FTRACE_FUNC # offset 6
|
|
|
- * > lgr %r0,%r0 # offset 12
|
|
|
- * > basr %r14,%r1 # offset 16
|
|
|
- * lg %r14,8(%15) # offset 18
|
|
|
- * The return points of the mcount/ftrace function have the same offset 18.
|
|
|
- * The 64-bit disable ftrace code block looks like this:
|
|
|
- * stg %r14,8(%r15) # offset 0
|
|
|
+ * br %r1 # offset 12
|
|
|
+ * brcl 0,0 # offset 14
|
|
|
+ * brc 0,0 # offset 20
|
|
|
+ * The ftrace function gets called with a non-standard C function call ABI
|
|
|
+ * where r0 contains the return address. It is also expected that the called
|
|
|
+ * function only clobbers r0 and r1, but restores r2-r15.
|
|
|
+ * The return point of the ftrace function has offset 24, so execution
|
|
|
+ * continues behind the mcount block.
|
|
|
+ * larl %r0,.+24 # offset 0
|
|
|
* > jg .+18 # offset 6
|
|
|
- * > lgr %r0,%r0 # offset 12
|
|
|
- * > basr %r14,%r1 # offset 16
|
|
|
- * lg %r14,8(%15) # offset 18
|
|
|
+ * br %r1 # offset 12
|
|
|
+ * brcl 0,0 # offset 14
|
|
|
+ * brc 0,0 # offset 20
|
|
|
* The jg instruction branches to offset 24 to skip as many instructions
|
|
|
* as possible.
|
|
|
*/
|
|
|
asm(
|
|
|
" .align 4\n"
|
|
|
+ "mcount_replace_code:\n"
|
|
|
+ " larl %r0,0f\n"
|
|
|
"ftrace_disable_code:\n"
|
|
|
" jg 0f\n"
|
|
|
- " lgr %r0,%r0\n"
|
|
|
- " basr %r14,%r1\n"
|
|
|
+ " br %r1\n"
|
|
|
+ " brcl 0,0\n"
|
|
|
+ " brc 0,0\n"
|
|
|
"0:\n"
|
|
|
" .align 4\n"
|
|
|
"ftrace_enable_insn:\n"
|
|
|
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
|
|
|
|
|
|
+#define MCOUNT_BLOCK_SIZE 24
|
|
|
+#define MCOUNT_INSN_OFFSET 6
|
|
|
#define FTRACE_INSN_SIZE 6
|
|
|
|
|
|
#else /* CONFIG_64BIT */
|
|
@@ -116,6 +129,16 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
|
|
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
|
|
unsigned long addr)
|
|
|
{
|
|
|
+#ifdef CONFIG_64BIT
|
|
|
+ /* Initial replacement of the whole mcount block */
|
|
|
+ if (addr == MCOUNT_ADDR) {
|
|
|
+ if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
|
|
|
+ mcount_replace_code,
|
|
|
+ MCOUNT_BLOCK_SIZE))
|
|
|
+ return -EPERM;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+#endif
|
|
|
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
|
|
|
MCOUNT_INSN_SIZE))
|
|
|
return -EPERM;
|