Browse Source

ftrace: Pass probe ops to probe function

In preparation to cleaning up the probe function registration code, the
"data" parameter will eventually be removed from the probe->func() call.
Instead it will receive its own "ops" function, in which it can set up its
own data that it needs to map.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Steven Rostedt (VMware) 8 years ago
parent
commit
bca6c8d048

+ 1 - 1
kernel/trace/ftrace.c

@@ -3739,7 +3739,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
 	preempt_disable_notrace();
 	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
 		if (entry->ip == ip)
-			entry->ops->func(ip, parent_ip, &entry->data);
+			entry->ops->func(ip, parent_ip, entry->ops, &entry->data);
 	}
 	preempt_enable_notrace();
 }

+ 4 - 2
kernel/trace/trace.c

@@ -6735,13 +6735,15 @@ static const struct file_operations tracing_dyn_info_fops = {
 
 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
 static void
-ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
+		struct ftrace_probe_ops *ops, void **data)
 {
 	tracing_snapshot();
 }
 
 static void
-ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
+		      struct ftrace_probe_ops *ops, void **data)
 {
 	unsigned long *count = (long *)data;
 

+ 1 - 0
kernel/trace/trace.h

@@ -934,6 +934,7 @@ static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) {
 struct ftrace_probe_ops {
 	void			(*func)(unsigned long ip,
 					unsigned long parent_ip,
+					struct ftrace_probe_ops *ops,
 					void **data);
 	int			(*init)(struct ftrace_probe_ops *ops,
 					unsigned long ip, void **data);

+ 5 - 3
kernel/trace/trace_events.c

@@ -2461,7 +2461,8 @@ struct event_probe_data {
 };
 
 static void
-event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
+event_enable_probe(unsigned long ip, unsigned long parent_ip,
+		   struct ftrace_probe_ops *ops, void **_data)
 {
 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
 	struct event_probe_data *data = *pdata;
@@ -2476,7 +2477,8 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
 }
 
 static void
-event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
+event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
+			 struct ftrace_probe_ops *ops, void **_data)
 {
 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
 	struct event_probe_data *data = *pdata;
@@ -2494,7 +2496,7 @@ event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data
 	if (data->count != -1)
 		(data->count)--;
 
-	event_enable_probe(ip, parent_ip, _data);
+	event_enable_probe(ip, parent_ip, ops, _data);
 }
 
 static int

+ 16 - 8
kernel/trace/trace_functions.c

@@ -326,19 +326,22 @@ static void update_traceon_count(void **data, bool on)
 }
 
 static void
-ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
+		     struct ftrace_probe_ops *ops, void **data)
 {
 	update_traceon_count(data, 1);
 }
 
 static void
-ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
+		      struct ftrace_probe_ops *ops, void **data)
 {
 	update_traceon_count(data, 0);
 }
 
 static void
-ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_traceon(unsigned long ip, unsigned long parent_ip,
+	       struct ftrace_probe_ops *ops, void **data)
 {
 	if (tracing_is_on())
 		return;
@@ -347,7 +350,8 @@ ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 }
 
 static void
-ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
+		struct ftrace_probe_ops *ops, void **data)
 {
 	if (!tracing_is_on())
 		return;
@@ -365,13 +369,15 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 #define STACK_SKIP 4
 
 static void
-ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
+		  struct ftrace_probe_ops *ops, void **data)
 {
 	trace_dump_stack(STACK_SKIP);
 }
 
 static void
-ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
+			struct ftrace_probe_ops *ops, void **data)
 {
 	long *count = (long *)data;
 	long old_count;
@@ -419,7 +425,8 @@ static int update_count(void **data)
 }
 
 static void
-ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
+	struct ftrace_probe_ops *ops, void **data)
 {
 	if (update_count(data))
 		ftrace_dump(DUMP_ALL);
@@ -427,7 +434,8 @@ ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 /* Only dump the current CPU buffer. */
 static void
-ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
+	struct ftrace_probe_ops *ops, void **data)
 {
 	if (update_count(data))
 		ftrace_dump(DUMP_ORIG);