|
@@ -297,6 +297,12 @@ static void ftrace_sync_ipi(void *data)
|
|
|
smp_rmb();
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
+static void update_function_graph_func(void);
|
|
|
+#else
|
|
|
+static inline void update_function_graph_func(void) { }
|
|
|
+#endif
|
|
|
+
|
|
|
static void update_ftrace_function(void)
|
|
|
{
|
|
|
ftrace_func_t func;
|
|
@@ -329,6 +335,8 @@ static void update_ftrace_function(void)
|
|
|
if (ftrace_trace_function == func)
|
|
|
return;
|
|
|
|
|
|
+ update_function_graph_func();
|
|
|
+
|
|
|
/*
|
|
|
* If we are using the list function, it doesn't care
|
|
|
* about the function_trace_ops.
|
|
@@ -4906,6 +4914,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
|
|
|
trace_func_graph_ret_t ftrace_graph_return =
|
|
|
(trace_func_graph_ret_t)ftrace_stub;
|
|
|
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
|
|
|
+static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
|
|
|
|
|
|
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
|
|
|
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
|
@@ -5047,6 +5056,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
|
|
|
FTRACE_OPS_FL_RECURSION_SAFE,
|
|
|
};
|
|
|
|
|
|
+static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
|
|
|
+{
|
|
|
+ if (!ftrace_ops_test(&global_ops, trace->func, NULL))
|
|
|
+ return 0;
|
|
|
+ return __ftrace_graph_entry(trace);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * The function graph tracer should only trace the functions defined
|
|
|
+ * by set_ftrace_filter and set_ftrace_notrace. If another function
|
|
|
+ * tracer ops is registered, the graph tracer requires testing the
|
|
|
+ * function against the global ops, and not just trace any function
|
|
|
+ * that any ftrace_ops registered.
|
|
|
+ */
|
|
|
+static void update_function_graph_func(void)
|
|
|
+{
|
|
|
+ if (ftrace_ops_list == &ftrace_list_end ||
|
|
|
+ (ftrace_ops_list == &global_ops &&
|
|
|
+ global_ops.next == &ftrace_list_end))
|
|
|
+ ftrace_graph_entry = __ftrace_graph_entry;
|
|
|
+ else
|
|
|
+ ftrace_graph_entry = ftrace_graph_entry_test;
|
|
|
+}
|
|
|
+
|
|
|
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|
|
trace_func_graph_ent_t entryfunc)
|
|
|
{
|
|
@@ -5071,7 +5104,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|
|
}
|
|
|
|
|
|
ftrace_graph_return = retfunc;
|
|
|
- ftrace_graph_entry = entryfunc;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Update the indirect function to the entryfunc, and the
|
|
|
+ * function that gets called to the entry_test first. Then
|
|
|
+ * call the update fgraph entry function to determine if
|
|
|
+ * the entryfunc should be called directly or not.
|
|
|
+ */
|
|
|
+ __ftrace_graph_entry = entryfunc;
|
|
|
+ ftrace_graph_entry = ftrace_graph_entry_test;
|
|
|
+ update_function_graph_func();
|
|
|
|
|
|
ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
|
|
|
|
|
@@ -5090,6 +5132,7 @@ void unregister_ftrace_graph(void)
|
|
|
ftrace_graph_active--;
|
|
|
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
|
|
|
ftrace_graph_entry = ftrace_graph_entry_stub;
|
|
|
+ __ftrace_graph_entry = ftrace_graph_entry_stub;
|
|
|
ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
|
|
|
unregister_pm_notifier(&ftrace_suspend_notifier);
|
|
|
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
|