|
@@ -2828,13 +2828,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
|
|
|
|
|
if (!command || !ftrace_enabled) {
|
|
|
/*
|
|
|
- * If these are per_cpu ops, they still need their
|
|
|
- * per_cpu field freed. Since, function tracing is
|
|
|
+ * If these are dynamic or per_cpu ops, they still
|
|
|
+ * need their data freed. Since, function tracing is
|
|
|
* not currently active, we can just free them
|
|
|
* without synchronizing all CPUs.
|
|
|
*/
|
|
|
- if (ops->flags & FTRACE_OPS_FL_PER_CPU)
|
|
|
- per_cpu_ops_free(ops);
|
|
|
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
|
|
|
+ goto free_ops;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2900,6 +2901,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
|
|
if (IS_ENABLED(CONFIG_PREEMPT))
|
|
|
synchronize_rcu_tasks();
|
|
|
|
|
|
+ free_ops:
|
|
|
arch_ftrace_trampoline_free(ops);
|
|
|
|
|
|
if (ops->flags & FTRACE_OPS_FL_PER_CPU)
|