|
@@ -98,6 +98,13 @@ struct ftrace_pid {
|
|
|
struct pid *pid;
|
|
|
};
|
|
|
|
|
|
+static bool ftrace_pids_enabled(void)
|
|
|
+{
|
|
|
+ return !list_empty(&ftrace_pids);
|
|
|
+}
|
|
|
+
|
|
|
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
|
|
|
+
|
|
|
/*
|
|
|
* ftrace_disabled is set when an anomaly is discovered.
|
|
|
* ftrace_disabled is much stronger than ftrace_enabled.
|
|
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
|
|
|
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
|
|
|
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
|
|
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
|
|
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
|
|
static struct ftrace_ops global_ops;
|
|
|
static struct ftrace_ops control_ops;
|
|
|
|
|
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
|
|
|
if (!test_tsk_trace_trace(current))
|
|
|
return;
|
|
|
|
|
|
- ftrace_pid_function(ip, parent_ip, op, regs);
|
|
|
-}
|
|
|
-
|
|
|
-static void set_ftrace_pid_function(ftrace_func_t func)
|
|
|
-{
|
|
|
- /* do not set ftrace_pid_function to itself! */
|
|
|
- if (func != ftrace_pid_func)
|
|
|
- ftrace_pid_function = func;
|
|
|
+ op->saved_func(ip, parent_ip, op, regs);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
|
|
|
void clear_ftrace_function(void)
|
|
|
{
|
|
|
ftrace_trace_function = ftrace_stub;
|
|
|
- ftrace_pid_function = ftrace_stub;
|
|
|
}
|
|
|
|
|
|
static void control_ops_disable_all(struct ftrace_ops *ops)
|
|
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
|
|
} else
|
|
|
add_ftrace_ops(&ftrace_ops_list, ops);
|
|
|
|
|
|
+ /* Always save the function, and reset at unregistering */
|
|
|
+ ops->saved_func = ops->func;
|
|
|
+
|
|
|
+ if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
|
|
|
+ ops->func = ftrace_pid_func;
|
|
|
+
|
|
|
ftrace_update_trampoline(ops);
|
|
|
|
|
|
if (ftrace_enabled)
|
|
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
if (ftrace_enabled)
|
|
|
update_ftrace_function();
|
|
|
|
|
|
+ ops->func = ops->saved_func;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static void ftrace_update_pid_func(void)
|
|
|
{
|
|
|
+ bool enabled = ftrace_pids_enabled();
|
|
|
+ struct ftrace_ops *op;
|
|
|
+
|
|
|
/* Only do something if we are tracing something */
|
|
|
if (ftrace_trace_function == ftrace_stub)
|
|
|
return;
|
|
|
|
|
|
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
|
|
|
+ if (op->flags & FTRACE_OPS_FL_PID) {
|
|
|
+ op->func = enabled ? ftrace_pid_func :
|
|
|
+ op->saved_func;
|
|
|
+ ftrace_update_trampoline(op);
|
|
|
+ }
|
|
|
+ } while_for_each_ftrace_op(op);
|
|
|
+
|
|
|
update_ftrace_function();
|
|
|
}
|
|
|
|
|
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
|
|
|
.local_hash.filter_hash = EMPTY_HASH,
|
|
|
INIT_OPS_HASH(global_ops)
|
|
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
|
|
- FTRACE_OPS_FL_INITIALIZED,
|
|
|
+ FTRACE_OPS_FL_INITIALIZED |
|
|
|
+ FTRACE_OPS_FL_PID,
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
|
|
|
|
|
|
static struct ftrace_ops global_ops = {
|
|
|
.func = ftrace_stub,
|
|
|
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
|
|
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
|
|
+ FTRACE_OPS_FL_INITIALIZED |
|
|
|
+ FTRACE_OPS_FL_PID,
|
|
|
};
|
|
|
|
|
|
static int __init ftrace_nodyn_init(void)
|
|
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
|
|
|
if (WARN_ON(tr->ops->func != ftrace_stub))
|
|
|
printk("ftrace ops had %pS for function\n",
|
|
|
tr->ops->func);
|
|
|
- /* Only the top level instance does pid tracing */
|
|
|
- if (!list_empty(&ftrace_pids)) {
|
|
|
- set_ftrace_pid_function(func);
|
|
|
- func = ftrace_pid_func;
|
|
|
- }
|
|
|
}
|
|
|
tr->ops->func = func;
|
|
|
tr->ops->private = tr;
|
|
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
|
|
|
{
|
|
|
mutex_lock(&ftrace_lock);
|
|
|
|
|
|
- if (list_empty(&ftrace_pids) && (!*pos))
|
|
|
+ if (!ftrace_pids_enabled() && (!*pos))
|
|
|
return (void *) 1;
|
|
|
|
|
|
return seq_list_start(&ftrace_pids, *pos);
|
|
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
|
|
|
.func = ftrace_stub,
|
|
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
|
|
FTRACE_OPS_FL_INITIALIZED |
|
|
|
+ FTRACE_OPS_FL_PID |
|
|
|
FTRACE_OPS_FL_STUB,
|
|
|
#ifdef FTRACE_GRAPH_TRAMP_ADDR
|
|
|
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
|