|
@@ -85,6 +85,8 @@ int function_trace_stop __read_mostly;
|
|
|
|
|
|
/* Current function tracing op */
|
|
|
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
|
|
|
+/* What to set function_trace_op to */
|
|
|
+static struct ftrace_ops *set_function_trace_op;
|
|
|
|
|
|
/* List for set_ftrace_pid's pids. */
|
|
|
LIST_HEAD(ftrace_pids);
|
|
@@ -278,6 +280,23 @@ static void update_global_ops(void)
|
|
|
global_ops.func = func;
|
|
|
}
|
|
|
|
|
|
+static void ftrace_sync(struct work_struct *work)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * This function is just a stub to implement a hard force
|
|
|
+ * of synchronize_sched(). This requires synchronizing
|
|
|
+ * tasks even in userspace and idle.
|
|
|
+ *
|
|
|
+ * Yes, function tracing is rude.
|
|
|
+ */
|
|
|
+}
|
|
|
+
|
|
|
+static void ftrace_sync_ipi(void *data)
|
|
|
+{
|
|
|
+ /* Probably not needed, but do it anyway */
|
|
|
+ smp_rmb();
|
|
|
+}
|
|
|
+
|
|
|
static void update_ftrace_function(void)
|
|
|
{
|
|
|
ftrace_func_t func;
|
|
@@ -296,16 +315,59 @@ static void update_ftrace_function(void)
|
|
|
!FTRACE_FORCE_LIST_FUNC)) {
|
|
|
/* Set the ftrace_ops that the arch callback uses */
|
|
|
if (ftrace_ops_list == &global_ops)
|
|
|
- function_trace_op = ftrace_global_list;
|
|
|
+ set_function_trace_op = ftrace_global_list;
|
|
|
else
|
|
|
- function_trace_op = ftrace_ops_list;
|
|
|
+ set_function_trace_op = ftrace_ops_list;
|
|
|
func = ftrace_ops_list->func;
|
|
|
} else {
|
|
|
/* Just use the default ftrace_ops */
|
|
|
- function_trace_op = &ftrace_list_end;
|
|
|
+ set_function_trace_op = &ftrace_list_end;
|
|
|
func = ftrace_ops_list_func;
|
|
|
}
|
|
|
|
|
|
+ /* If there's no change, then do nothing more here */
|
|
|
+ if (ftrace_trace_function == func)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we are using the list function, it doesn't care
|
|
|
+ * about the function_trace_ops.
|
|
|
+ */
|
|
|
+ if (func == ftrace_ops_list_func) {
|
|
|
+ ftrace_trace_function = func;
|
|
|
+ /*
|
|
|
+ * Don't even bother setting function_trace_ops,
|
|
|
+ * it would be racy to do so anyway.
|
|
|
+ */
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifndef CONFIG_DYNAMIC_FTRACE
|
|
|
+ /*
|
|
|
+ * For static tracing, we need to be a bit more careful.
|
|
|
+ * The function change takes affect immediately. Thus,
|
|
|
+ * we need to coorditate the setting of the function_trace_ops
|
|
|
+ * with the setting of the ftrace_trace_function.
|
|
|
+ *
|
|
|
+ * Set the function to the list ops, which will call the
|
|
|
+ * function we want, albeit indirectly, but it handles the
|
|
|
+ * ftrace_ops and doesn't depend on function_trace_op.
|
|
|
+ */
|
|
|
+ ftrace_trace_function = ftrace_ops_list_func;
|
|
|
+ /*
|
|
|
+ * Make sure all CPUs see this. Yes this is slow, but static
|
|
|
+ * tracing is slow and nasty to have enabled.
|
|
|
+ */
|
|
|
+ schedule_on_each_cpu(ftrace_sync);
|
|
|
+ /* Now all cpus are using the list ops. */
|
|
|
+ function_trace_op = set_function_trace_op;
|
|
|
+ /* Make sure the function_trace_op is visible on all CPUs */
|
|
|
+ smp_wmb();
|
|
|
+ /* Nasty way to force a rmb on all cpus */
|
|
|
+ smp_call_function(ftrace_sync_ipi, NULL, 1);
|
|
|
+ /* OK, we are all set to update the ftrace_trace_function now! */
|
|
|
+#endif /* !CONFIG_DYNAMIC_FTRACE */
|
|
|
+
|
|
|
ftrace_trace_function = func;
|
|
|
}
|
|
|
|
|
@@ -410,17 +472,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void ftrace_sync(struct work_struct *work)
|
|
|
-{
|
|
|
- /*
|
|
|
- * This function is just a stub to implement a hard force
|
|
|
- * of synchronize_sched(). This requires synchronizing
|
|
|
- * tasks even in userspace and idle.
|
|
|
- *
|
|
|
- * Yes, function tracing is rude.
|
|
|
- */
|
|
|
-}
|
|
|
-
|
|
|
static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
{
|
|
|
int ret;
|
|
@@ -1979,8 +2030,14 @@ void ftrace_modify_all_code(int command)
|
|
|
else if (command & FTRACE_DISABLE_CALLS)
|
|
|
ftrace_replace_code(0);
|
|
|
|
|
|
- if (update && ftrace_trace_function != ftrace_ops_list_func)
|
|
|
+ if (update && ftrace_trace_function != ftrace_ops_list_func) {
|
|
|
+ function_trace_op = set_function_trace_op;
|
|
|
+ smp_wmb();
|
|
|
+ /* If irqs are disabled, we are in stop machine */
|
|
|
+ if (!irqs_disabled())
|
|
|
+ smp_call_function(ftrace_sync_ipi, NULL, 1);
|
|
|
ftrace_update_ftrace_func(ftrace_trace_function);
|
|
|
+ }
|
|
|
|
|
|
if (command & FTRACE_START_FUNC_RET)
|
|
|
ftrace_enable_ftrace_graph_caller();
|