|
@@ -498,20 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
|
|
|
ret = remove_ftrace_list_ops(&ftrace_control_list,
|
|
|
&control_ops, ops);
|
|
|
- if (!ret) {
|
|
|
- /*
|
|
|
- * The ftrace_ops is now removed from the list,
|
|
|
- * so there'll be no new users. We must ensure
|
|
|
- * all current users are done before we free
|
|
|
- * the control data.
|
|
|
- * Note synchronize_sched() is not enough, as we
|
|
|
- * use preempt_disable() to do RCU, but the function
|
|
|
- * tracer can be called where RCU is not active
|
|
|
- * (before user_exit()).
|
|
|
- */
|
|
|
- schedule_on_each_cpu(ftrace_sync);
|
|
|
- control_ops_free(ops);
|
|
|
- }
|
|
|
} else
|
|
|
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
|
|
|
|
|
@@ -521,17 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
if (ftrace_enabled)
|
|
|
update_ftrace_function();
|
|
|
|
|
|
- /*
|
|
|
- * Dynamic ops may be freed, we must make sure that all
|
|
|
- * callers are done before leaving this function.
|
|
|
- *
|
|
|
- * Again, normal synchronize_sched() is not good enough.
|
|
|
- * We need to do a hard force of sched synchronization.
|
|
|
- */
|
|
|
- if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
|
|
|
- schedule_on_each_cpu(ftrace_sync);
|
|
|
-
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2208,10 +2183,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
|
|
command |= FTRACE_UPDATE_TRACE_FUNC;
|
|
|
}
|
|
|
|
|
|
- if (!command || !ftrace_enabled)
|
|
|
+ if (!command || !ftrace_enabled) {
|
|
|
+ /*
|
|
|
+ * If these are control ops, they still need their
|
|
|
+ * per_cpu field freed. Since, function tracing is
|
|
|
+ * not currently active, we can just free them
|
|
|
+ * without synchronizing all CPUs.
|
|
|
+ */
|
|
|
+ if (ops->flags & FTRACE_OPS_FL_CONTROL)
|
|
|
+ control_ops_free(ops);
|
|
|
return 0;
|
|
|
+ }
|
|
|
|
|
|
ftrace_run_update_code(command);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Dynamic ops may be freed, we must make sure that all
|
|
|
+ * callers are done before leaving this function.
|
|
|
+ * The same goes for freeing the per_cpu data of the control
|
|
|
+ * ops.
|
|
|
+ *
|
|
|
+ * Again, normal synchronize_sched() is not good enough.
|
|
|
+ * We need to do a hard force of sched synchronization.
|
|
|
+ * This is because we use preempt_disable() to do RCU, but
|
|
|
+ * the function tracers can be called where RCU is not watching
|
|
|
+ * (like before user_exit()). We can not rely on the RCU
|
|
|
+ * infrastructure to do the synchronization, thus we must do it
|
|
|
+ * ourselves.
|
|
|
+ */
|
|
|
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
|
|
|
+ schedule_on_each_cpu(ftrace_sync);
|
|
|
+
|
|
|
+ if (ops->flags & FTRACE_OPS_FL_CONTROL)
|
|
|
+ control_ops_free(ops);
|
|
|
+ }
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|