|
|
@@ -105,11 +105,23 @@ enum {
|
|
|
FTRACE_OPS_FL_INITIALIZED = 1 << 8,
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * Note, ftrace_ops can be referenced outside of RCU protection.
|
|
|
+ * (Although, for perf, the control ops prevent that). If ftrace_ops is
|
|
|
+ * allocated and not part of kernel core data, the unregistering of it will
|
|
|
+ * perform a scheduling on all CPUs to make sure that there are no more users.
|
|
|
+ * Depending on the load of the system that may take a bit of time.
|
|
|
+ *
|
|
|
+ * Any private data added must also take care not to be freed and if private
|
|
|
+ * data is added to a ftrace_ops that is in core code, the user of the
|
|
|
+ * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
|
|
|
+ */
|
|
|
struct ftrace_ops {
|
|
|
ftrace_func_t func;
|
|
|
struct ftrace_ops *next;
|
|
|
unsigned long flags;
|
|
|
int __percpu *disabled;
|
|
|
+ void *private;
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
struct ftrace_hash *notrace_hash;
|
|
|
struct ftrace_hash *filter_hash;
|