|
@@ -13,33 +13,83 @@
|
|
#include <linux/debugfs.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/ftrace.h>
|
|
|
|
+#include <linux/slab.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/fs.h>
|
|
|
|
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
|
|
|
|
-/* function tracing enabled */
|
|
|
|
-static int ftrace_function_enabled;
|
|
|
|
|
|
+static void tracing_start_function_trace(struct trace_array *tr);
|
|
|
|
+static void tracing_stop_function_trace(struct trace_array *tr);
|
|
|
|
+static void
|
|
|
|
+function_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
|
|
+ struct ftrace_ops *op, struct pt_regs *pt_regs);
|
|
|
|
+static void
|
|
|
|
+function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
|
|
+ struct ftrace_ops *op, struct pt_regs *pt_regs);
|
|
|
|
+static struct ftrace_ops trace_ops;
|
|
|
|
+static struct ftrace_ops trace_stack_ops;
|
|
|
|
+static struct tracer_flags func_flags;
|
|
|
|
+
|
|
|
|
+/* Our option */
|
|
|
|
+enum {
|
|
|
|
+ TRACE_FUNC_OPT_STACK = 0x1,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static int allocate_ftrace_ops(struct trace_array *tr)
|
|
|
|
+{
|
|
|
|
+ struct ftrace_ops *ops;
|
|
|
|
|
|
-static struct trace_array *func_trace;
|
|
|
|
|
|
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
|
|
|
|
+ if (!ops)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
-static void tracing_start_function_trace(void);
|
|
|
|
-static void tracing_stop_function_trace(void);
|
|
|
|
|
|
+ /* Currently only the non stack verision is supported */
|
|
|
|
+ ops->func = function_trace_call;
|
|
|
|
+ ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
|
|
|
|
+
|
|
|
|
+ tr->ops = ops;
|
|
|
|
+ ops->private = tr;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
|
|
static int function_trace_init(struct trace_array *tr)
|
|
static int function_trace_init(struct trace_array *tr)
|
|
{
|
|
{
|
|
- func_trace = tr;
|
|
|
|
|
|
+ struct ftrace_ops *ops;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
|
|
|
|
+ /* There's only one global tr */
|
|
|
|
+ if (!trace_ops.private) {
|
|
|
|
+ trace_ops.private = tr;
|
|
|
|
+ trace_stack_ops.private = tr;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (func_flags.val & TRACE_FUNC_OPT_STACK)
|
|
|
|
+ ops = &trace_stack_ops;
|
|
|
|
+ else
|
|
|
|
+ ops = &trace_ops;
|
|
|
|
+ tr->ops = ops;
|
|
|
|
+ } else {
|
|
|
|
+ ret = allocate_ftrace_ops(tr);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
tr->trace_buffer.cpu = get_cpu();
|
|
tr->trace_buffer.cpu = get_cpu();
|
|
put_cpu();
|
|
put_cpu();
|
|
|
|
|
|
tracing_start_cmdline_record();
|
|
tracing_start_cmdline_record();
|
|
- tracing_start_function_trace();
|
|
|
|
|
|
+ tracing_start_function_trace(tr);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static void function_trace_reset(struct trace_array *tr)
|
|
static void function_trace_reset(struct trace_array *tr)
|
|
{
|
|
{
|
|
- tracing_stop_function_trace();
|
|
|
|
|
|
+ tracing_stop_function_trace(tr);
|
|
tracing_stop_cmdline_record();
|
|
tracing_stop_cmdline_record();
|
|
|
|
+ if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
|
|
|
|
+ kfree(tr->ops);
|
|
|
|
+ tr->ops = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
static void function_trace_start(struct trace_array *tr)
|
|
static void function_trace_start(struct trace_array *tr)
|
|
@@ -47,25 +97,18 @@ static void function_trace_start(struct trace_array *tr)
|
|
tracing_reset_online_cpus(&tr->trace_buffer);
|
|
tracing_reset_online_cpus(&tr->trace_buffer);
|
|
}
|
|
}
|
|
|
|
|
|
-/* Our option */
|
|
|
|
-enum {
|
|
|
|
- TRACE_FUNC_OPT_STACK = 0x1,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static struct tracer_flags func_flags;
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
function_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
function_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
|
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
|
{
|
|
{
|
|
- struct trace_array *tr = func_trace;
|
|
|
|
|
|
+ struct trace_array *tr = op->private;
|
|
struct trace_array_cpu *data;
|
|
struct trace_array_cpu *data;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int bit;
|
|
int bit;
|
|
int cpu;
|
|
int cpu;
|
|
int pc;
|
|
int pc;
|
|
|
|
|
|
- if (unlikely(!ftrace_function_enabled))
|
|
|
|
|
|
+ if (unlikely(!tr->function_enabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
pc = preempt_count();
|
|
pc = preempt_count();
|
|
@@ -91,14 +134,14 @@ static void
|
|
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
|
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
|
{
|
|
{
|
|
- struct trace_array *tr = func_trace;
|
|
|
|
|
|
+ struct trace_array *tr = op->private;
|
|
struct trace_array_cpu *data;
|
|
struct trace_array_cpu *data;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
long disabled;
|
|
long disabled;
|
|
int cpu;
|
|
int cpu;
|
|
int pc;
|
|
int pc;
|
|
|
|
|
|
- if (unlikely(!ftrace_function_enabled))
|
|
|
|
|
|
+ if (unlikely(!tr->function_enabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -128,7 +171,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
static struct ftrace_ops trace_ops __read_mostly =
|
|
static struct ftrace_ops trace_ops __read_mostly =
|
|
{
|
|
{
|
|
.func = function_trace_call,
|
|
.func = function_trace_call,
|
|
@@ -153,26 +195,17 @@ static struct tracer_flags func_flags = {
|
|
.opts = func_opts
|
|
.opts = func_opts
|
|
};
|
|
};
|
|
|
|
|
|
-static void tracing_start_function_trace(void)
|
|
|
|
|
|
+static void tracing_start_function_trace(struct trace_array *tr)
|
|
{
|
|
{
|
|
- ftrace_function_enabled = 0;
|
|
|
|
-
|
|
|
|
- if (func_flags.val & TRACE_FUNC_OPT_STACK)
|
|
|
|
- register_ftrace_function(&trace_stack_ops);
|
|
|
|
- else
|
|
|
|
- register_ftrace_function(&trace_ops);
|
|
|
|
-
|
|
|
|
- ftrace_function_enabled = 1;
|
|
|
|
|
|
+ tr->function_enabled = 0;
|
|
|
|
+ register_ftrace_function(tr->ops);
|
|
|
|
+ tr->function_enabled = 1;
|
|
}
|
|
}
|
|
|
|
|
|
-static void tracing_stop_function_trace(void)
|
|
|
|
|
|
+static void tracing_stop_function_trace(struct trace_array *tr)
|
|
{
|
|
{
|
|
- ftrace_function_enabled = 0;
|
|
|
|
-
|
|
|
|
- if (func_flags.val & TRACE_FUNC_OPT_STACK)
|
|
|
|
- unregister_ftrace_function(&trace_stack_ops);
|
|
|
|
- else
|
|
|
|
- unregister_ftrace_function(&trace_ops);
|
|
|
|
|
|
+ tr->function_enabled = 0;
|
|
|
|
+ unregister_ftrace_function(tr->ops);
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
@@ -184,12 +217,14 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
|
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
|
|
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ unregister_ftrace_function(tr->ops);
|
|
|
|
+
|
|
if (set) {
|
|
if (set) {
|
|
- unregister_ftrace_function(&trace_ops);
|
|
|
|
- register_ftrace_function(&trace_stack_ops);
|
|
|
|
|
|
+ tr->ops = &trace_stack_ops;
|
|
|
|
+ register_ftrace_function(tr->ops);
|
|
} else {
|
|
} else {
|
|
- unregister_ftrace_function(&trace_stack_ops);
|
|
|
|
- register_ftrace_function(&trace_ops);
|
|
|
|
|
|
+ tr->ops = &trace_ops;
|
|
|
|
+ register_ftrace_function(tr->ops);
|
|
}
|
|
}
|
|
|
|
|
|
break;
|
|
break;
|
|
@@ -209,6 +244,7 @@ static struct tracer function_trace __tracer_data =
|
|
.wait_pipe = poll_wait_pipe,
|
|
.wait_pipe = poll_wait_pipe,
|
|
.flags = &func_flags,
|
|
.flags = &func_flags,
|
|
.set_flag = func_set_flag,
|
|
.set_flag = func_set_flag,
|
|
|
|
+ .allow_instances = true,
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
.selftest = trace_selftest_startup_function,
|
|
.selftest = trace_selftest_startup_function,
|
|
#endif
|
|
#endif
|