|
@@ -17,6 +17,87 @@
|
|
|
#include <asm/ftrace.h>
|
|
|
#include <asm/insn.h>
|
|
|
|
|
|
+#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
+/*
|
|
|
+ * Replace a single instruction, which may be a branch or NOP.
|
|
|
+ * If @validate == true, a replaced instruction is checked against 'old'.
|
|
|
+ */
|
|
|
+static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
|
|
|
+ bool validate)
|
|
|
+{
|
|
|
+ u32 replaced;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Note:
|
|
|
+ * Due to modules and __init, code can disappear and change,
|
|
|
+ * we need to protect against faulting as well as code changing.
|
|
|
+ * We do this by aarch64_insn_*() which use the probe_kernel_*().
|
|
|
+ *
|
|
|
+ * No lock is held here because all the modifications are run
|
|
|
+ * through stop_machine().
|
|
|
+ */
|
|
|
+ if (validate) {
|
|
|
+ if (aarch64_insn_read((void *)pc, &replaced))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ if (replaced != old)
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ if (aarch64_insn_patch_text_nosync((void *)pc, new))
|
|
|
+ return -EPERM;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Replace tracer function in ftrace_caller()
|
|
|
+ */
|
|
|
+int ftrace_update_ftrace_func(ftrace_func_t func)
|
|
|
+{
|
|
|
+ unsigned long pc;
|
|
|
+ u32 new;
|
|
|
+
|
|
|
+ pc = (unsigned long)&ftrace_call;
|
|
|
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
|
|
|
+
|
|
|
+ return ftrace_modify_code(pc, 0, new, false);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Turn on the call to ftrace_caller() in instrumented function
|
|
|
+ */
|
|
|
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|
|
+{
|
|
|
+ unsigned long pc = rec->ip;
|
|
|
+ u32 old, new;
|
|
|
+
|
|
|
+ old = aarch64_insn_gen_nop();
|
|
|
+ new = aarch64_insn_gen_branch_imm(pc, addr, true);
|
|
|
+
|
|
|
+ return ftrace_modify_code(pc, old, new, true);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Turn off the call to ftrace_caller() in instrumented function
|
|
|
+ */
|
|
|
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ unsigned long pc = rec->ip;
|
|
|
+ u32 old, new;
|
|
|
+
|
|
|
+ old = aarch64_insn_gen_branch_imm(pc, addr, true);
|
|
|
+ new = aarch64_insn_gen_nop();
|
|
|
+
|
|
|
+ return ftrace_modify_code(pc, old, new, true);
|
|
|
+}
|
|
|
+
|
|
|
+int __init ftrace_dyn_arch_init(void)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
+
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
/*
|
|
|
* function_graph tracer expects ftrace_return_to_handler() to be called
|
|
@@ -61,4 +142,35 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
+/*
|
|
|
+ * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
|
|
|
+ * depending on @enable.
|
|
|
+ */
|
|
|
+static int ftrace_modify_graph_caller(bool enable)
|
|
|
+{
|
|
|
+ unsigned long pc = (unsigned long)&ftrace_graph_call;
|
|
|
+ u32 branch, nop;
|
|
|
+
|
|
|
+ branch = aarch64_insn_gen_branch_imm(pc,
|
|
|
+ (unsigned long)ftrace_graph_caller, false);
|
|
|
+ nop = aarch64_insn_gen_nop();
|
|
|
+
|
|
|
+ if (enable)
|
|
|
+ return ftrace_modify_code(pc, nop, branch, true);
|
|
|
+ else
|
|
|
+ return ftrace_modify_code(pc, branch, nop, true);
|
|
|
+}
|
|
|
+
|
|
|
+int ftrace_enable_ftrace_graph_caller(void)
|
|
|
+{
|
|
|
+ return ftrace_modify_graph_caller(true);
|
|
|
+}
|
|
|
+
|
|
|
+int ftrace_disable_ftrace_graph_caller(void)
|
|
|
+{
|
|
|
+ return ftrace_modify_graph_caller(false);
|
|
|
+}
|
|
|
+#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|