|
|
@@ -65,15 +65,21 @@
|
|
|
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
-#define INIT_REGEX_LOCK(opsname) \
|
|
|
- .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
|
|
|
+#define INIT_OPS_HASH(opsname) \
|
|
|
+ .func_hash = &opsname.local_hash, \
|
|
|
+ .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
|
|
|
+#define ASSIGN_OPS_HASH(opsname, val) \
|
|
|
+ .func_hash = val, \
|
|
|
+ .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
|
|
|
#else
|
|
|
-#define INIT_REGEX_LOCK(opsname)
|
|
|
+#define INIT_OPS_HASH(opsname)
|
|
|
+#define ASSIGN_OPS_HASH(opsname, val)
|
|
|
#endif
|
|
|
|
|
|
static struct ftrace_ops ftrace_list_end __read_mostly = {
|
|
|
.func = ftrace_stub,
|
|
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
|
|
|
+ INIT_OPS_HASH(ftrace_list_end)
|
|
|
};
|
|
|
|
|
|
/* ftrace_enabled is a method to turn ftrace on or off */
|
|
|
@@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
|
|
|
{
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
|
|
|
- mutex_init(&ops->regex_lock);
|
|
|
+ mutex_init(&ops->local_hash.regex_lock);
|
|
|
+ ops->func_hash = &ops->local_hash;
|
|
|
ops->flags |= FTRACE_OPS_FL_INITIALIZED;
|
|
|
}
|
|
|
#endif
|
|
|
@@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void)
|
|
|
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
|
|
|
.func = function_profile_call,
|
|
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
|
|
- INIT_REGEX_LOCK(ftrace_profile_ops)
|
|
|
+ INIT_OPS_HASH(ftrace_profile_ops)
|
|
|
};
|
|
|
|
|
|
static int register_ftrace_profiler(void)
|
|
|
@@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = {
|
|
|
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
|
|
|
|
|
|
static struct ftrace_ops global_ops = {
|
|
|
- .func = ftrace_stub,
|
|
|
- .notrace_hash = EMPTY_HASH,
|
|
|
- .filter_hash = EMPTY_HASH,
|
|
|
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
|
|
- INIT_REGEX_LOCK(global_ops)
|
|
|
+ .func = ftrace_stub,
|
|
|
+ .local_hash.notrace_hash = EMPTY_HASH,
|
|
|
+ .local_hash.filter_hash = EMPTY_HASH,
|
|
|
+ INIT_OPS_HASH(global_ops)
|
|
|
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
|
|
+ FTRACE_OPS_FL_INITIALIZED,
|
|
|
};
|
|
|
|
|
|
struct ftrace_page {
|
|
|
@@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
|
|
|
void ftrace_free_filter(struct ftrace_ops *ops)
|
|
|
{
|
|
|
ftrace_ops_init(ops);
|
|
|
- free_ftrace_hash(ops->filter_hash);
|
|
|
- free_ftrace_hash(ops->notrace_hash);
|
|
|
+ free_ftrace_hash(ops->func_hash->filter_hash);
|
|
|
+ free_ftrace_hash(ops->func_hash->notrace_hash);
|
|
|
}
|
|
|
|
|
|
static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
|
|
|
@@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
|
|
|
+ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
|
|
|
static void
|
|
|
-ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
|
|
|
+ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
|
|
|
|
|
|
static int
|
|
|
ftrace_hash_move(struct ftrace_ops *ops, int enable,
|
|
|
@@ -1342,13 +1350,13 @@ update:
|
|
|
* Remove the current set, update the hash and add
|
|
|
* them back.
|
|
|
*/
|
|
|
- ftrace_hash_rec_disable(ops, enable);
|
|
|
+ ftrace_hash_rec_disable_modify(ops, enable);
|
|
|
|
|
|
old_hash = *dst;
|
|
|
rcu_assign_pointer(*dst, new_hash);
|
|
|
free_ftrace_hash_rcu(old_hash);
|
|
|
|
|
|
- ftrace_hash_rec_enable(ops, enable);
|
|
|
+ ftrace_hash_rec_enable_modify(ops, enable);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
|
|
return 0;
|
|
|
#endif
|
|
|
|
|
|
- filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
|
|
|
- notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
|
|
|
+ filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
|
|
|
+ notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
|
|
|
|
|
|
if ((ftrace_hash_empty(filter_hash) ||
|
|
|
ftrace_lookup_ip(filter_hash, ip)) &&
|
|
|
@@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
|
|
|
static void ftrace_remove_tramp(struct ftrace_ops *ops,
|
|
|
struct dyn_ftrace *rec)
|
|
|
{
|
|
|
- struct ftrace_func_entry *entry;
|
|
|
-
|
|
|
- entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
|
|
|
- if (!entry)
|
|
|
+ /* If TRAMP is not set, no ops should have a trampoline for this */
|
|
|
+ if (!(rec->flags & FTRACE_FL_TRAMP))
|
|
|
return;
|
|
|
|
|
|
+ rec->flags &= ~FTRACE_FL_TRAMP;
|
|
|
+
|
|
|
+ if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
|
|
|
+ !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
|
|
|
+ ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
|
|
|
+ return;
|
|
|
/*
|
|
|
* The tramp_hash entry will be removed at time
|
|
|
* of update.
|
|
|
*/
|
|
|
ops->nr_trampolines--;
|
|
|
- rec->flags &= ~FTRACE_FL_TRAMP;
|
|
|
}
|
|
|
|
|
|
-static void ftrace_clear_tramps(struct dyn_ftrace *rec)
|
|
|
+static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
|
|
|
{
|
|
|
struct ftrace_ops *op;
|
|
|
|
|
|
+ /* If TRAMP is not set, no ops should have a trampoline for this */
|
|
|
+ if (!(rec->flags & FTRACE_FL_TRAMP))
|
|
|
+ return;
|
|
|
+
|
|
|
do_for_each_ftrace_op(op, ftrace_ops_list) {
|
|
|
+ /*
|
|
|
+ * This function is called to clear other tramps
|
|
|
+ * not the one that is being updated.
|
|
|
+ */
|
|
|
+ if (op == ops)
|
|
|
+ continue;
|
|
|
if (op->nr_trampolines)
|
|
|
ftrace_remove_tramp(op, rec);
|
|
|
} while_for_each_ftrace_op(op);
|
|
|
@@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|
|
* gets inversed.
|
|
|
*/
|
|
|
if (filter_hash) {
|
|
|
- hash = ops->filter_hash;
|
|
|
- other_hash = ops->notrace_hash;
|
|
|
+ hash = ops->func_hash->filter_hash;
|
|
|
+ other_hash = ops->func_hash->notrace_hash;
|
|
|
if (ftrace_hash_empty(hash))
|
|
|
all = 1;
|
|
|
} else {
|
|
|
inc = !inc;
|
|
|
- hash = ops->notrace_hash;
|
|
|
- other_hash = ops->filter_hash;
|
|
|
+ hash = ops->func_hash->notrace_hash;
|
|
|
+ other_hash = ops->func_hash->filter_hash;
|
|
|
/*
|
|
|
* If the notrace hash has no items,
|
|
|
* then there's nothing to do.
|
|
|
@@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|
|
/*
|
|
|
* If we are adding another function callback
|
|
|
* to this function, and the previous had a
|
|
|
- * trampoline used, then we need to go back to
|
|
|
- * the default trampoline.
|
|
|
+ * custom trampoline in use, then we need to go
|
|
|
+ * back to the default trampoline.
|
|
|
*/
|
|
|
- rec->flags &= ~FTRACE_FL_TRAMP;
|
|
|
-
|
|
|
- /* remove trampolines from any ops for this rec */
|
|
|
- ftrace_clear_tramps(rec);
|
|
|
+ ftrace_clear_tramps(rec, ops);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
|
|
|
__ftrace_hash_rec_update(ops, filter_hash, 1);
|
|
|
}
|
|
|
|
|
|
+static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
|
|
|
+ int filter_hash, int inc)
|
|
|
+{
|
|
|
+ struct ftrace_ops *op;
|
|
|
+
|
|
|
+ __ftrace_hash_rec_update(ops, filter_hash, inc);
|
|
|
+
|
|
|
+ if (ops->func_hash != &global_ops.local_hash)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the ops shares the global_ops hash, then we need to update
|
|
|
+ * all ops that are enabled and use this hash.
|
|
|
+ */
|
|
|
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
|
|
|
+ /* Already done */
|
|
|
+ if (op == ops)
|
|
|
+ continue;
|
|
|
+ if (op->func_hash == &global_ops.local_hash)
|
|
|
+ __ftrace_hash_rec_update(op, filter_hash, inc);
|
|
|
+ } while_for_each_ftrace_op(op);
|
|
|
+}
|
|
|
+
|
|
|
+static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
|
|
|
+ int filter_hash)
|
|
|
+{
|
|
|
+ ftrace_hash_rec_update_modify(ops, filter_hash, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
|
|
|
+ int filter_hash)
|
|
|
+{
|
|
|
+ ftrace_hash_rec_update_modify(ops, filter_hash, 1);
|
|
|
+}
|
|
|
+
|
|
|
static void print_ip_ins(const char *fmt, unsigned char *p)
|
|
|
{
|
|
|
int i;
|
|
|
@@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
|
|
|
if (rec->flags & FTRACE_FL_TRAMP) {
|
|
|
ops = ftrace_find_tramp_ops_new(rec);
|
|
|
if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
|
|
|
- pr_warning("Bad trampoline accounting at: %p (%pS)\n",
|
|
|
- (void *)rec->ip, (void *)rec->ip);
|
|
|
+ pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
|
|
|
+ (void *)rec->ip, (void *)rec->ip, rec->flags);
|
|
|
/* Ftrace is shutting down, return anything */
|
|
|
return (unsigned long)FTRACE_ADDR;
|
|
|
}
|
|
|
@@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
|
|
return ftrace_make_call(rec, ftrace_addr);
|
|
|
|
|
|
case FTRACE_UPDATE_MAKE_NOP:
|
|
|
- return ftrace_make_nop(NULL, rec, ftrace_addr);
|
|
|
+ return ftrace_make_nop(NULL, rec, ftrace_old_addr);
|
|
|
|
|
|
case FTRACE_UPDATE_MODIFY_CALL:
|
|
|
return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
|
|
|
@@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
|
|
|
} while_for_each_ftrace_rec();
|
|
|
|
|
|
/* The number of recs in the hash must match nr_trampolines */
|
|
|
- FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines);
|
|
|
+ if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
|
|
|
+ pr_warn("count=%ld trampolines=%d\n",
|
|
|
+ ops->tramp_hash->count,
|
|
|
+ ops->nr_trampolines);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
|
|
|
* Filter_hash being empty will default to trace module.
|
|
|
* But notrace hash requires a test of individual module functions.
|
|
|
*/
|
|
|
- return ftrace_hash_empty(ops->filter_hash) &&
|
|
|
- ftrace_hash_empty(ops->notrace_hash);
|
|
|
+ return ftrace_hash_empty(ops->func_hash->filter_hash) &&
|
|
|
+ ftrace_hash_empty(ops->func_hash->notrace_hash);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
|
|
|
return 0;
|
|
|
|
|
|
/* The function must be in the filter */
|
|
|
- if (!ftrace_hash_empty(ops->filter_hash) &&
|
|
|
- !ftrace_lookup_ip(ops->filter_hash, rec->ip))
|
|
|
+ if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
|
|
|
+ !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
|
|
|
return 0;
|
|
|
|
|
|
/* If in notrace hash, we ignore it too */
|
|
|
- if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
|
|
|
+ if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
|
|
|
return 0;
|
|
|
|
|
|
return 1;
|
|
|
@@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
} else {
|
|
|
rec = &iter->pg->records[iter->idx++];
|
|
|
if (((iter->flags & FTRACE_ITER_FILTER) &&
|
|
|
- !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
|
|
|
+ !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
|
|
|
|
|
|
((iter->flags & FTRACE_ITER_NOTRACE) &&
|
|
|
- !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
|
|
|
+ !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
|
|
|
|
|
|
((iter->flags & FTRACE_ITER_ENABLED) &&
|
|
|
!(rec->flags & FTRACE_FL_ENABLED))) {
|
|
|
@@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
|
|
* functions are enabled.
|
|
|
*/
|
|
|
if ((iter->flags & FTRACE_ITER_FILTER &&
|
|
|
- ftrace_hash_empty(ops->filter_hash)) ||
|
|
|
+ ftrace_hash_empty(ops->func_hash->filter_hash)) ||
|
|
|
(iter->flags & FTRACE_ITER_NOTRACE &&
|
|
|
- ftrace_hash_empty(ops->notrace_hash))) {
|
|
|
+ ftrace_hash_empty(ops->func_hash->notrace_hash))) {
|
|
|
if (*pos > 0)
|
|
|
return t_hash_start(m, pos);
|
|
|
iter->flags |= FTRACE_ITER_PRINTALL;
|
|
|
@@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
|
|
|
iter->ops = ops;
|
|
|
iter->flags = flag;
|
|
|
|
|
|
- mutex_lock(&ops->regex_lock);
|
|
|
+ mutex_lock(&ops->func_hash->regex_lock);
|
|
|
|
|
|
if (flag & FTRACE_ITER_NOTRACE)
|
|
|
- hash = ops->notrace_hash;
|
|
|
+ hash = ops->func_hash->notrace_hash;
|
|
|
else
|
|
|
- hash = ops->filter_hash;
|
|
|
+ hash = ops->func_hash->filter_hash;
|
|
|
|
|
|
if (file->f_mode & FMODE_WRITE) {
|
|
|
const int size_bits = FTRACE_HASH_DEFAULT_BITS;
|
|
|
@@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
|
|
|
file->private_data = iter;
|
|
|
|
|
|
out_unlock:
|
|
|
- mutex_unlock(&ops->regex_lock);
|
|
|
+ mutex_unlock(&ops->func_hash->regex_lock);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
@@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
|
|
|
{
|
|
|
.func = function_trace_probe_call,
|
|
|
.flags = FTRACE_OPS_FL_INITIALIZED,
|
|
|
- INIT_REGEX_LOCK(trace_probe_ops)
|
|
|
+ INIT_OPS_HASH(trace_probe_ops)
|
|
|
};
|
|
|
|
|
|
static int ftrace_probe_registered;
|
|
|
@@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|
|
void *data)
|
|
|
{
|
|
|
struct ftrace_func_probe *entry;
|
|
|
- struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
|
|
|
+ struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
|
|
|
struct ftrace_hash *hash;
|
|
|
struct ftrace_page *pg;
|
|
|
struct dyn_ftrace *rec;
|
|
|
@@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|
|
if (WARN_ON(not))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- mutex_lock(&trace_probe_ops.regex_lock);
|
|
|
+ mutex_lock(&trace_probe_ops.func_hash->regex_lock);
|
|
|
|
|
|
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
|
|
if (!hash) {
|
|
|
@@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|
|
out_unlock:
|
|
|
mutex_unlock(&ftrace_lock);
|
|
|
out:
|
|
|
- mutex_unlock(&trace_probe_ops.regex_lock);
|
|
|
+ mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
|
|
|
free_ftrace_hash(hash);
|
|
|
|
|
|
return count;
|
|
|
@@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|
|
struct ftrace_func_entry *rec_entry;
|
|
|
struct ftrace_func_probe *entry;
|
|
|
struct ftrace_func_probe *p;
|
|
|
- struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
|
|
|
+ struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
|
|
|
struct list_head free_list;
|
|
|
struct ftrace_hash *hash;
|
|
|
struct hlist_node *tmp;
|
|
|
@@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&trace_probe_ops.regex_lock);
|
|
|
+ mutex_lock(&trace_probe_ops.func_hash->regex_lock);
|
|
|
|
|
|
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
|
|
|
if (!hash)
|
|
|
@@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|
|
mutex_unlock(&ftrace_lock);
|
|
|
|
|
|
out_unlock:
|
|
|
- mutex_unlock(&trace_probe_ops.regex_lock);
|
|
|
+ mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
|
|
|
free_ftrace_hash(hash);
|
|
|
}
|
|
|
|
|
|
@@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
|
|
|
if (unlikely(ftrace_disabled))
|
|
|
return -ENODEV;
|
|
|
|
|
|
- mutex_lock(&ops->regex_lock);
|
|
|
+ mutex_lock(&ops->func_hash->regex_lock);
|
|
|
|
|
|
if (enable)
|
|
|
- orig_hash = &ops->filter_hash;
|
|
|
+ orig_hash = &ops->func_hash->filter_hash;
|
|
|
else
|
|
|
- orig_hash = &ops->notrace_hash;
|
|
|
+ orig_hash = &ops->func_hash->notrace_hash;
|
|
|
|
|
|
if (reset)
|
|
|
hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
|
|
|
@@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
|
|
|
mutex_unlock(&ftrace_lock);
|
|
|
|
|
|
out_regex_unlock:
|
|
|
- mutex_unlock(&ops->regex_lock);
|
|
|
+ mutex_unlock(&ops->func_hash->regex_lock);
|
|
|
|
|
|
free_ftrace_hash(hash);
|
|
|
return ret;
|
|
|
@@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
|
|
|
|
|
|
trace_parser_put(parser);
|
|
|
|
|
|
- mutex_lock(&iter->ops->regex_lock);
|
|
|
+ mutex_lock(&iter->ops->func_hash->regex_lock);
|
|
|
|
|
|
if (file->f_mode & FMODE_WRITE) {
|
|
|
filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
|
|
|
|
|
|
if (filter_hash)
|
|
|
- orig_hash = &iter->ops->filter_hash;
|
|
|
+ orig_hash = &iter->ops->func_hash->filter_hash;
|
|
|
else
|
|
|
- orig_hash = &iter->ops->notrace_hash;
|
|
|
+ orig_hash = &iter->ops->func_hash->notrace_hash;
|
|
|
|
|
|
mutex_lock(&ftrace_lock);
|
|
|
ret = ftrace_hash_move(iter->ops, filter_hash,
|
|
|
@@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
|
|
|
mutex_unlock(&ftrace_lock);
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&iter->ops->regex_lock);
|
|
|
+ mutex_unlock(&iter->ops->func_hash->regex_lock);
|
|
|
free_ftrace_hash(iter->hash);
|
|
|
kfree(iter);
|
|
|
|
|
|
@@ -4611,7 +4667,6 @@ void __init ftrace_init(void)
|
|
|
static struct ftrace_ops global_ops = {
|
|
|
.func = ftrace_stub,
|
|
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
|
|
- INIT_REGEX_LOCK(global_ops)
|
|
|
};
|
|
|
|
|
|
static int __init ftrace_nodyn_init(void)
|
|
|
@@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
|
|
|
static struct ftrace_ops control_ops = {
|
|
|
.func = ftrace_ops_control_func,
|
|
|
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
|
|
- INIT_REGEX_LOCK(control_ops)
|
|
|
+ INIT_OPS_HASH(control_ops)
|
|
|
};
|
|
|
|
|
|
static inline void
|
|
|
@@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
|
|
+static struct ftrace_ops graph_ops = {
|
|
|
+ .func = ftrace_stub,
|
|
|
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
|
|
+ FTRACE_OPS_FL_INITIALIZED |
|
|
|
+ FTRACE_OPS_FL_STUB,
|
|
|
+#ifdef FTRACE_GRAPH_TRAMP_ADDR
|
|
|
+ .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
|
|
|
+#endif
|
|
|
+ ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
|
|
|
+};
|
|
|
+
|
|
|
static int ftrace_graph_active;
|
|
|
|
|
|
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
|
|
|
@@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
|
|
|
*/
|
|
|
static void update_function_graph_func(void)
|
|
|
{
|
|
|
- if (ftrace_ops_list == &ftrace_list_end ||
|
|
|
- (ftrace_ops_list == &global_ops &&
|
|
|
- global_ops.next == &ftrace_list_end))
|
|
|
- ftrace_graph_entry = __ftrace_graph_entry;
|
|
|
- else
|
|
|
+ struct ftrace_ops *op;
|
|
|
+ bool do_test = false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The graph and global ops share the same set of functions
|
|
|
+ * to test. If any other ops is on the list, then
|
|
|
+ * the graph tracing needs to test if its the function
|
|
|
+ * it should call.
|
|
|
+ */
|
|
|
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
|
|
|
+ if (op != &global_ops && op != &graph_ops &&
|
|
|
+ op != &ftrace_list_end) {
|
|
|
+ do_test = true;
|
|
|
+ /* in double loop, break out with goto */
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ } while_for_each_ftrace_op(op);
|
|
|
+ out:
|
|
|
+ if (do_test)
|
|
|
ftrace_graph_entry = ftrace_graph_entry_test;
|
|
|
+ else
|
|
|
+ ftrace_graph_entry = __ftrace_graph_entry;
|
|
|
}
|
|
|
|
|
|
static struct notifier_block ftrace_suspend_notifier = {
|
|
|
@@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|
|
ftrace_graph_entry = ftrace_graph_entry_test;
|
|
|
update_function_graph_func();
|
|
|
|
|
|
- /* Function graph doesn't use the .func field of global_ops */
|
|
|
- global_ops.flags |= FTRACE_OPS_FL_STUB;
|
|
|
-
|
|
|
-#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
- /* Optimize function graph calling (if implemented by arch) */
|
|
|
- if (FTRACE_GRAPH_TRAMP_ADDR != 0)
|
|
|
- global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
|
|
|
-#endif
|
|
|
-
|
|
|
- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
|
|
|
+ ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
|
|
|
|
|
|
out:
|
|
|
mutex_unlock(&ftrace_lock);
|
|
|
@@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void)
|
|
|
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
|
|
|
ftrace_graph_entry = ftrace_graph_entry_stub;
|
|
|
__ftrace_graph_entry = ftrace_graph_entry_stub;
|
|
|
- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
|
|
|
- global_ops.flags &= ~FTRACE_OPS_FL_STUB;
|
|
|
-#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
- if (FTRACE_GRAPH_TRAMP_ADDR != 0)
|
|
|
- global_ops.trampoline = 0;
|
|
|
-#endif
|
|
|
+ ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
|
|
|
unregister_pm_notifier(&ftrace_suspend_notifier);
|
|
|
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
|
|
|
|