|
@@ -17,6 +17,7 @@
|
|
#include <linux/ftrace.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched.h>
|
|
|
|
+#include <linux/slab.h>
|
|
#include <linux/init.h>
|
|
#include <linux/init.h>
|
|
#include <linux/list.h>
|
|
#include <linux/list.h>
|
|
#include <linux/module.h>
|
|
#include <linux/module.h>
|
|
@@ -644,13 +645,8 @@ int __init ftrace_dyn_arch_init(void)
|
|
{
|
|
{
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
-extern void ftrace_graph_call(void);
|
|
|
|
|
|
|
|
|
|
+#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
|
|
static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
|
|
static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
|
|
{
|
|
{
|
|
static union ftrace_code_union calc;
|
|
static union ftrace_code_union calc;
|
|
@@ -664,6 +660,193 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
|
|
*/
|
|
*/
|
|
return calc.code;
|
|
return calc.code;
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+/* Currently only x86_64 supports dynamic trampolines */
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_MODULES
|
|
|
|
+#include <linux/moduleloader.h>
|
|
|
|
+/* Module allocation simplifies allocating memory for code */
|
|
|
|
+static inline void *alloc_tramp(unsigned long size)
|
|
|
|
+{
|
|
|
|
+ return module_alloc(size);
|
|
|
|
+}
|
|
|
|
+static inline void tramp_free(void *tramp)
|
|
|
|
+{
|
|
|
|
+ module_free(NULL, tramp);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+/* Trampolines can only be created if modules are supported */
|
|
|
|
+static inline void *alloc_tramp(unsigned long size)
|
|
|
|
+{
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+static inline void tramp_free(void *tramp) { }
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+/* Defined as markers to the end of the ftrace default trampolines */
|
|
|
|
+extern void ftrace_caller_end(void);
|
|
|
|
+extern void ftrace_regs_caller_end(void);
|
|
|
|
+extern void ftrace_return(void);
|
|
|
|
+extern void ftrace_caller_op_ptr(void);
|
|
|
|
+extern void ftrace_regs_caller_op_ptr(void);
|
|
|
|
+
|
|
|
|
+/* movq function_trace_op(%rip), %rdx */
|
|
|
|
+/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
|
|
|
|
+#define OP_REF_SIZE 7
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * The ftrace_ops is passed to the function callback. Since the
|
|
|
|
+ * trampoline only services a single ftrace_ops, we can pass in
|
|
|
|
+ * that ops directly.
|
|
|
|
+ *
|
|
|
|
+ * The ftrace_op_code_union is used to create a pointer to the
|
|
|
|
+ * ftrace_ops that will be passed to the callback function.
|
|
|
|
+ */
|
|
|
|
+union ftrace_op_code_union {
|
|
|
|
+ char code[OP_REF_SIZE];
|
|
|
|
+ struct {
|
|
|
|
+ char op[3];
|
|
|
|
+ int offset;
|
|
|
|
+ } __attribute__((packed));
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static unsigned long create_trampoline(struct ftrace_ops *ops)
|
|
|
|
+{
|
|
|
|
+ unsigned const char *jmp;
|
|
|
|
+ unsigned long start_offset;
|
|
|
|
+ unsigned long end_offset;
|
|
|
|
+ unsigned long op_offset;
|
|
|
|
+ unsigned long offset;
|
|
|
|
+ unsigned long size;
|
|
|
|
+ unsigned long ip;
|
|
|
|
+ unsigned long *ptr;
|
|
|
|
+ void *trampoline;
|
|
|
|
+ /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
|
|
|
|
+ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
|
|
|
|
+ union ftrace_op_code_union op_ptr;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
|
|
|
|
+ start_offset = (unsigned long)ftrace_regs_caller;
|
|
|
|
+ end_offset = (unsigned long)ftrace_regs_caller_end;
|
|
|
|
+ op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
|
|
|
|
+ } else {
|
|
|
|
+ start_offset = (unsigned long)ftrace_caller;
|
|
|
|
+ end_offset = (unsigned long)ftrace_caller_end;
|
|
|
|
+ op_offset = (unsigned long)ftrace_caller_op_ptr;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ size = end_offset - start_offset;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Allocate enough size to store the ftrace_caller code,
|
|
|
|
+ * the jmp to ftrace_return, as well as the address of
|
|
|
|
+ * the ftrace_ops this trampoline is used for.
|
|
|
|
+ */
|
|
|
|
+ trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
|
|
|
|
+ if (!trampoline)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /* Copy ftrace_caller onto the trampoline memory */
|
|
|
|
+ ret = probe_kernel_read(trampoline, (void *)start_offset, size);
|
|
|
|
+ if (WARN_ON(ret < 0)) {
|
|
|
|
+ tramp_free(trampoline);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ip = (unsigned long)trampoline + size;
|
|
|
|
+
|
|
|
|
+ /* The trampoline ends with a jmp to ftrace_return */
|
|
|
|
+ jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return);
|
|
|
|
+ memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The address of the ftrace_ops that is used for this trampoline
|
|
|
|
+ * is stored at the end of the trampoline. This will be used to
|
|
|
|
+ * load the third parameter for the callback. Basically, that
|
|
|
|
+ * location at the end of the trampoline takes the place of
|
|
|
|
+ * the global function_trace_op variable.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
|
|
|
|
+ *ptr = (unsigned long)ops;
|
|
|
|
+
|
|
|
|
+ op_offset -= start_offset;
|
|
|
|
+ memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
|
|
|
|
+
|
|
|
|
+ /* Are we pointing to the reference? */
|
|
|
|
+ if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
|
|
|
|
+ tramp_free(trampoline);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Load the contents of ptr into the callback parameter */
|
|
|
|
+ offset = (unsigned long)ptr;
|
|
|
|
+ offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
|
|
|
|
+
|
|
|
|
+ op_ptr.offset = offset;
|
|
|
|
+
|
|
|
|
+ /* put in the new offset to the ftrace_ops */
|
|
|
|
+ memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
|
|
|
|
+
|
|
|
|
+ /* ALLOC_TRAMP flags lets us know we created it */
|
|
|
|
+ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
|
|
|
|
+
|
|
|
|
+ return (unsigned long)trampoline;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
|
|
|
|
+{
|
|
|
|
+ ftrace_func_t func;
|
|
|
|
+ unsigned char *new;
|
|
|
|
+ unsigned long start_offset;
|
|
|
|
+ unsigned long call_offset;
|
|
|
|
+ unsigned long offset;
|
|
|
|
+ unsigned long ip;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ if (ops->trampoline) {
|
|
|
|
+ /*
|
|
|
|
+ * The ftrace_ops caller may set up its own trampoline.
|
|
|
|
+ * In such a case, this code must not modify it.
|
|
|
|
+ */
|
|
|
|
+ if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
|
|
|
|
+ return;
|
|
|
|
+ } else {
|
|
|
|
+ ops->trampoline = create_trampoline(ops);
|
|
|
|
+ if (!ops->trampoline)
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
|
|
|
|
+ start_offset = (unsigned long)ftrace_regs_caller;
|
|
|
|
+ call_offset = (unsigned long)ftrace_regs_call;
|
|
|
|
+ } else {
|
|
|
|
+ start_offset = (unsigned long)ftrace_caller;
|
|
|
|
+ call_offset = (unsigned long)ftrace_call;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ offset = call_offset - start_offset;
|
|
|
|
+ ip = ops->trampoline + offset;
|
|
|
|
+
|
|
|
|
+ func = ftrace_ops_get_func(ops);
|
|
|
|
+
|
|
|
|
+ /* Do a safe modify in case the trampoline is executing */
|
|
|
|
+ new = ftrace_call_replace(ip, (unsigned long)func);
|
|
|
|
+ ret = update_ftrace_func(ip, new);
|
|
|
|
+
|
|
|
|
+ /* The update should never fail */
|
|
|
|
+ WARN_ON(ret);
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_X86_64 */
|
|
|
|
+#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
+extern void ftrace_graph_call(void);
|
|
|
|
|
|
static int ftrace_mod_jmp(unsigned long ip, void *func)
|
|
static int ftrace_mod_jmp(unsigned long ip, void *func)
|
|
{
|
|
{
|