|
@@ -214,12 +214,10 @@ __setup("alloc_snapshot", boot_alloc_snapshot);
|
|
|
|
|
|
|
|
|
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
|
|
|
-static char *trace_boot_options __initdata;
|
|
|
|
|
|
static int __init set_trace_boot_options(char *str)
|
|
|
{
|
|
|
strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
|
|
|
- trace_boot_options = trace_boot_options_buf;
|
|
|
return 0;
|
|
|
}
|
|
|
__setup("trace_options=", set_trace_boot_options);
|
|
@@ -250,6 +248,19 @@ unsigned long long ns2usecs(cycle_t nsec)
|
|
|
return nsec;
|
|
|
}
|
|
|
|
|
|
+/* trace_flags holds trace_options default values */
|
|
|
+#define TRACE_DEFAULT_FLAGS \
|
|
|
+ (FUNCTION_DEFAULT_FLAGS | \
|
|
|
+ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
|
|
|
+ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
|
|
|
+ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
|
|
|
+ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
|
|
|
+
|
|
|
+/* trace_options that are only supported by global_trace */
|
|
|
+#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
|
|
|
+ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
|
|
|
+
|
|
|
+
|
|
|
/*
|
|
|
* The global_trace is the descriptor that holds the tracing
|
|
|
* buffers for the live tracing. For each CPU, it contains
|
|
@@ -262,7 +273,9 @@ unsigned long long ns2usecs(cycle_t nsec)
|
|
|
* pages for the buffer for that CPU. Each CPU has the same number
|
|
|
* of pages allocated for its buffer.
|
|
|
*/
|
|
|
-static struct trace_array global_trace;
|
|
|
+static struct trace_array global_trace = {
|
|
|
+ .trace_flags = TRACE_DEFAULT_FLAGS,
|
|
|
+};
|
|
|
|
|
|
LIST_HEAD(ftrace_trace_arrays);
|
|
|
|
|
@@ -468,11 +481,29 @@ static inline void trace_access_lock_init(void)
|
|
|
|
|
|
#endif
|
|
|
|
|
|
-/* trace_flags holds trace_options default values */
|
|
|
-unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
|
|
|
- TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
|
|
|
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
|
|
|
- TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
|
|
|
+#ifdef CONFIG_STACKTRACE
|
|
|
+static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
+ unsigned long flags,
|
|
|
+ int skip, int pc, struct pt_regs *regs);
|
|
|
+static inline void ftrace_trace_stack(struct trace_array *tr,
|
|
|
+ struct ring_buffer *buffer,
|
|
|
+ unsigned long flags,
|
|
|
+ int skip, int pc, struct pt_regs *regs);
|
|
|
+
|
|
|
+#else
|
|
|
+static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
+ unsigned long flags,
|
|
|
+ int skip, int pc, struct pt_regs *regs)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void ftrace_trace_stack(struct trace_array *tr,
|
|
|
+ struct ring_buffer *buffer,
|
|
|
+ unsigned long flags,
|
|
|
+ int skip, int pc, struct pt_regs *regs)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|
|
|
|
|
|
static void tracer_tracing_on(struct trace_array *tr)
|
|
|
{
|
|
@@ -518,7 +549,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
|
|
|
int alloc;
|
|
|
int pc;
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_PRINTK))
|
|
|
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
|
|
|
return 0;
|
|
|
|
|
|
pc = preempt_count();
|
|
@@ -548,7 +579,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
|
|
|
entry->buf[size] = '\0';
|
|
|
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
- ftrace_trace_stack(buffer, irq_flags, 4, pc);
|
|
|
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
|
|
|
|
|
|
return size;
|
|
|
}
|
|
@@ -568,7 +599,7 @@ int __trace_bputs(unsigned long ip, const char *str)
|
|
|
int size = sizeof(struct bputs_entry);
|
|
|
int pc;
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_PRINTK))
|
|
|
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
|
|
|
return 0;
|
|
|
|
|
|
pc = preempt_count();
|
|
@@ -588,7 +619,7 @@ int __trace_bputs(unsigned long ip, const char *str)
|
|
|
entry->str = str;
|
|
|
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
- ftrace_trace_stack(buffer, irq_flags, 4, pc);
|
|
|
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
|
|
|
|
|
|
return 1;
|
|
|
}
|
|
@@ -834,34 +865,18 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
|
|
|
return nsecs / 1000;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
|
|
|
+ * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
|
|
|
+ * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
|
|
|
+ * of strings in the order that the enums were defined.
|
|
|
+ */
|
|
|
+#undef C
|
|
|
+#define C(a, b) b
|
|
|
+
|
|
|
/* These must match the bit postions in trace_iterator_flags */
|
|
|
static const char *trace_options[] = {
|
|
|
- "print-parent",
|
|
|
- "sym-offset",
|
|
|
- "sym-addr",
|
|
|
- "verbose",
|
|
|
- "raw",
|
|
|
- "hex",
|
|
|
- "bin",
|
|
|
- "block",
|
|
|
- "stacktrace",
|
|
|
- "trace_printk",
|
|
|
- "ftrace_preempt",
|
|
|
- "branch",
|
|
|
- "annotate",
|
|
|
- "userstacktrace",
|
|
|
- "sym-userobj",
|
|
|
- "printk-msg-only",
|
|
|
- "context-info",
|
|
|
- "latency-format",
|
|
|
- "sleep-time",
|
|
|
- "graph-time",
|
|
|
- "record-cmd",
|
|
|
- "overwrite",
|
|
|
- "disable_on_free",
|
|
|
- "irq-info",
|
|
|
- "markers",
|
|
|
- "function-trace",
|
|
|
+ TRACE_FLAGS
|
|
|
NULL
|
|
|
};
|
|
|
|
|
@@ -1204,13 +1219,17 @@ static inline int run_tracer_selftest(struct tracer *type)
|
|
|
}
|
|
|
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
|
|
|
|
|
+static void add_tracer_options(struct trace_array *tr, struct tracer *t);
|
|
|
+
|
|
|
+static void __init apply_trace_boot_options(void);
|
|
|
+
|
|
|
/**
|
|
|
* register_tracer - register a tracer with the ftrace system.
|
|
|
* @type - the plugin for the tracer
|
|
|
*
|
|
|
* Register a new plugin tracer.
|
|
|
*/
|
|
|
-int register_tracer(struct tracer *type)
|
|
|
+int __init register_tracer(struct tracer *type)
|
|
|
{
|
|
|
struct tracer *t;
|
|
|
int ret = 0;
|
|
@@ -1253,6 +1272,7 @@ int register_tracer(struct tracer *type)
|
|
|
|
|
|
type->next = trace_types;
|
|
|
trace_types = type;
|
|
|
+ add_tracer_options(&global_trace, type);
|
|
|
|
|
|
out:
|
|
|
tracing_selftest_running = false;
|
|
@@ -1268,6 +1288,9 @@ int register_tracer(struct tracer *type)
|
|
|
/* Do we want this tracer to start on bootup? */
|
|
|
tracing_set_tracer(&global_trace, type->name);
|
|
|
default_bootup_tracer = NULL;
|
|
|
+
|
|
|
+ apply_trace_boot_options();
|
|
|
+
|
|
|
/* disable other selftests, since this will break it. */
|
|
|
tracing_selftest_disabled = true;
|
|
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
|
@@ -1671,23 +1694,16 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve
|
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
-__trace_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
- struct ring_buffer_event *event,
|
|
|
- unsigned long flags, int pc)
|
|
|
+void trace_buffer_unlock_commit(struct trace_array *tr,
|
|
|
+ struct ring_buffer *buffer,
|
|
|
+ struct ring_buffer_event *event,
|
|
|
+ unsigned long flags, int pc)
|
|
|
{
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
- ftrace_trace_stack(buffer, flags, 6, pc);
|
|
|
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
|
|
|
ftrace_trace_userstack(buffer, flags, pc);
|
|
|
}
|
|
|
-
|
|
|
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
- struct ring_buffer_event *event,
|
|
|
- unsigned long flags, int pc)
|
|
|
-{
|
|
|
- __trace_buffer_unlock_commit(buffer, event, flags, pc);
|
|
|
-}
|
|
|
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
|
|
|
|
|
|
static struct ring_buffer *temp_buffer;
|
|
@@ -1729,22 +1745,15 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
|
|
|
|
|
|
-void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
- struct ring_buffer_event *event,
|
|
|
- unsigned long flags, int pc)
|
|
|
-{
|
|
|
- __trace_buffer_unlock_commit(buffer, event, flags, pc);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
|
|
|
-
|
|
|
-void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
|
|
|
+void trace_buffer_unlock_commit_regs(struct trace_array *tr,
|
|
|
+ struct ring_buffer *buffer,
|
|
|
struct ring_buffer_event *event,
|
|
|
unsigned long flags, int pc,
|
|
|
struct pt_regs *regs)
|
|
|
{
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
- ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
|
|
|
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
|
|
|
ftrace_trace_userstack(buffer, flags, pc);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
|
|
@@ -1873,24 +1882,17 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
|
|
|
}
|
|
|
|
|
|
-void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
|
|
|
- int skip, int pc, struct pt_regs *regs)
|
|
|
+static inline void ftrace_trace_stack(struct trace_array *tr,
|
|
|
+ struct ring_buffer *buffer,
|
|
|
+ unsigned long flags,
|
|
|
+ int skip, int pc, struct pt_regs *regs)
|
|
|
{
|
|
|
- if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
|
|
+ if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
|
|
|
return;
|
|
|
|
|
|
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
|
|
|
}
|
|
|
|
|
|
-void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
|
|
|
- int skip, int pc)
|
|
|
-{
|
|
|
- if (!(trace_flags & TRACE_ITER_STACKTRACE))
|
|
|
- return;
|
|
|
-
|
|
|
- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
|
|
|
-}
|
|
|
-
|
|
|
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
|
|
int pc)
|
|
|
{
|
|
@@ -1929,7 +1931,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
|
struct userstack_entry *entry;
|
|
|
struct stack_trace trace;
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
|
|
|
+ if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
|
|
|
return;
|
|
|
|
|
|
/*
|
|
@@ -2173,7 +2175,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
|
|
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event)) {
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
- ftrace_trace_stack(buffer, flags, 6, pc);
|
|
|
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
|
|
|
}
|
|
|
|
|
|
out:
|
|
@@ -2225,7 +2227,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
|
|
|
memcpy(&entry->buf, tbuffer, len + 1);
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event)) {
|
|
|
__buffer_unlock_commit(buffer, event);
|
|
|
- ftrace_trace_stack(buffer, flags, 6, pc);
|
|
|
+ ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
|
|
|
}
|
|
|
out:
|
|
|
preempt_enable_notrace();
|
|
@@ -2246,7 +2248,7 @@ int trace_array_printk(struct trace_array *tr,
|
|
|
int ret;
|
|
|
va_list ap;
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_PRINTK))
|
|
|
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
|
|
|
return 0;
|
|
|
|
|
|
va_start(ap, fmt);
|
|
@@ -2261,7 +2263,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
|
|
|
int ret;
|
|
|
va_list ap;
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_PRINTK))
|
|
|
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
|
|
|
return 0;
|
|
|
|
|
|
va_start(ap, fmt);
|
|
@@ -2602,7 +2604,7 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
|
|
|
void
|
|
|
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
|
|
|
{
|
|
|
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
|
|
|
+ unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
|
|
|
struct trace_buffer *buf = iter->trace_buffer;
|
|
|
struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
|
|
|
struct tracer *type = iter->trace;
|
|
@@ -2664,20 +2666,22 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
|
|
|
static void test_cpu_buff_start(struct trace_iterator *iter)
|
|
|
{
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_ANNOTATE))
|
|
|
+ if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
|
|
|
return;
|
|
|
|
|
|
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
|
|
|
return;
|
|
|
|
|
|
- if (cpumask_test_cpu(iter->cpu, iter->started))
|
|
|
+ if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
|
|
|
return;
|
|
|
|
|
|
if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
|
|
|
return;
|
|
|
|
|
|
- cpumask_set_cpu(iter->cpu, iter->started);
|
|
|
+ if (iter->started)
|
|
|
+ cpumask_set_cpu(iter->cpu, iter->started);
|
|
|
|
|
|
/* Don't print started cpu buffer for the first entry of the trace */
|
|
|
if (iter->idx > 1)
|
|
@@ -2687,8 +2691,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
|
|
|
|
|
|
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
|
|
|
{
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
|
|
|
+ unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
|
|
|
struct trace_entry *entry;
|
|
|
struct trace_event *event;
|
|
|
|
|
@@ -2698,7 +2703,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
|
|
|
|
|
|
event = ftrace_find_event(entry->type);
|
|
|
|
|
|
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
|
|
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
|
|
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
|
|
|
trace_print_lat_context(iter);
|
|
|
else
|
|
@@ -2718,13 +2723,14 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
|
|
|
|
|
|
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
|
|
|
{
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
struct trace_entry *entry;
|
|
|
struct trace_event *event;
|
|
|
|
|
|
entry = iter->ent;
|
|
|
|
|
|
- if (trace_flags & TRACE_ITER_CONTEXT_INFO)
|
|
|
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
|
|
|
trace_seq_printf(s, "%d %d %llu ",
|
|
|
entry->pid, iter->cpu, iter->ts);
|
|
|
|
|
@@ -2742,6 +2748,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
|
|
|
|
|
|
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
|
|
|
{
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
unsigned char newline = '\n';
|
|
|
struct trace_entry *entry;
|
|
@@ -2749,7 +2756,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
|
|
|
|
|
|
entry = iter->ent;
|
|
|
|
|
|
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
|
|
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
|
|
SEQ_PUT_HEX_FIELD(s, entry->pid);
|
|
|
SEQ_PUT_HEX_FIELD(s, iter->cpu);
|
|
|
SEQ_PUT_HEX_FIELD(s, iter->ts);
|
|
@@ -2771,13 +2778,14 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
|
|
|
|
|
|
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
|
|
|
{
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
struct trace_seq *s = &iter->seq;
|
|
|
struct trace_entry *entry;
|
|
|
struct trace_event *event;
|
|
|
|
|
|
entry = iter->ent;
|
|
|
|
|
|
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
|
|
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
|
|
|
SEQ_PUT_FIELD(s, entry->pid);
|
|
|
SEQ_PUT_FIELD(s, iter->cpu);
|
|
|
SEQ_PUT_FIELD(s, iter->ts);
|
|
@@ -2826,6 +2834,8 @@ int trace_empty(struct trace_iterator *iter)
|
|
|
/* Called with trace_event_read_lock() held. */
|
|
|
enum print_line_t print_trace_line(struct trace_iterator *iter)
|
|
|
{
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
+ unsigned long trace_flags = tr->trace_flags;
|
|
|
enum print_line_t ret;
|
|
|
|
|
|
if (iter->lost_events) {
|
|
@@ -2871,6 +2881,7 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
|
|
|
void trace_latency_header(struct seq_file *m)
|
|
|
{
|
|
|
struct trace_iterator *iter = m->private;
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
|
|
|
/* print nothing if the buffers are empty */
|
|
|
if (trace_empty(iter))
|
|
@@ -2879,13 +2890,15 @@ void trace_latency_header(struct seq_file *m)
|
|
|
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
|
|
|
print_trace_header(m, iter);
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_VERBOSE))
|
|
|
+ if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
|
|
|
print_lat_help_header(m);
|
|
|
}
|
|
|
|
|
|
void trace_default_header(struct seq_file *m)
|
|
|
{
|
|
|
struct trace_iterator *iter = m->private;
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
+ unsigned long trace_flags = tr->trace_flags;
|
|
|
|
|
|
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
|
|
|
return;
|
|
@@ -3230,7 +3243,7 @@ static int tracing_open(struct inode *inode, struct file *file)
|
|
|
iter = __tracing_open(inode, file, false);
|
|
|
if (IS_ERR(iter))
|
|
|
ret = PTR_ERR(iter);
|
|
|
- else if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
|
|
+ else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
|
|
|
iter->iter_flags |= TRACE_FILE_LAT_FMT;
|
|
|
}
|
|
|
|
|
@@ -3477,7 +3490,7 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
|
|
|
trace_opts = tr->current_trace->flags->opts;
|
|
|
|
|
|
for (i = 0; trace_options[i]; i++) {
|
|
|
- if (trace_flags & (1 << i))
|
|
|
+ if (tr->trace_flags & (1 << i))
|
|
|
seq_printf(m, "%s\n", trace_options[i]);
|
|
|
else
|
|
|
seq_printf(m, "no%s\n", trace_options[i]);
|
|
@@ -3542,7 +3555,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
|
|
|
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
|
|
|
{
|
|
|
/* do nothing if flag is already set */
|
|
|
- if (!!(trace_flags & mask) == !!enabled)
|
|
|
+ if (!!(tr->trace_flags & mask) == !!enabled)
|
|
|
return 0;
|
|
|
|
|
|
/* Give the tracer a chance to approve the change */
|
|
@@ -3551,9 +3564,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (enabled)
|
|
|
- trace_flags |= mask;
|
|
|
+ tr->trace_flags |= mask;
|
|
|
else
|
|
|
- trace_flags &= ~mask;
|
|
|
+ tr->trace_flags &= ~mask;
|
|
|
|
|
|
if (mask == TRACE_ITER_RECORD_CMD)
|
|
|
trace_event_enable_cmd_record(enabled);
|
|
@@ -3565,8 +3578,10 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
- if (mask == TRACE_ITER_PRINTK)
|
|
|
+ if (mask == TRACE_ITER_PRINTK) {
|
|
|
trace_printk_start_stop_comm(enabled);
|
|
|
+ trace_printk_control(enabled);
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -3577,6 +3592,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
|
|
|
int neg = 0;
|
|
|
int ret = -ENODEV;
|
|
|
int i;
|
|
|
+ size_t orig_len = strlen(option);
|
|
|
|
|
|
cmp = strstrip(option);
|
|
|
|
|
@@ -3600,9 +3616,36 @@ static int trace_set_options(struct trace_array *tr, char *option)
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
+ /*
|
|
|
+ * If the first trailing whitespace is replaced with '\0' by strstrip,
|
|
|
+ * turn it back into a space.
|
|
|
+ */
|
|
|
+ if (orig_len > strlen(option))
|
|
|
+ option[strlen(option)] = ' ';
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void __init apply_trace_boot_options(void)
|
|
|
+{
|
|
|
+ char *buf = trace_boot_options_buf;
|
|
|
+ char *option;
|
|
|
+
|
|
|
+ while (true) {
|
|
|
+ option = strsep(&buf, ",");
|
|
|
+
|
|
|
+ if (!option)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (*option)
|
|
|
+ trace_set_options(&global_trace, option);
|
|
|
+
|
|
|
+ /* Put back the comma to allow this to be called again */
|
|
|
+ if (buf)
|
|
|
+ *(buf - 1) = ',';
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t
|
|
|
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
size_t cnt, loff_t *ppos)
|
|
@@ -4297,11 +4340,8 @@ int tracing_update_buffers(void)
|
|
|
|
|
|
struct trace_option_dentry;
|
|
|
|
|
|
-static struct trace_option_dentry *
|
|
|
-create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
|
|
|
-
|
|
|
static void
|
|
|
-destroy_trace_option_files(struct trace_option_dentry *topts);
|
|
|
+create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
|
|
|
|
|
|
/*
|
|
|
* Used to clear out the tracer before deletion of an instance.
|
|
@@ -4320,20 +4360,13 @@ static void tracing_set_nop(struct trace_array *tr)
|
|
|
tr->current_trace = &nop_trace;
|
|
|
}
|
|
|
|
|
|
-static void update_tracer_options(struct trace_array *tr, struct tracer *t)
|
|
|
+static void add_tracer_options(struct trace_array *tr, struct tracer *t)
|
|
|
{
|
|
|
- static struct trace_option_dentry *topts;
|
|
|
-
|
|
|
/* Only enable if the directory has been created already. */
|
|
|
if (!tr->dir)
|
|
|
return;
|
|
|
|
|
|
- /* Currently, only the top instance has options */
|
|
|
- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
|
|
|
- return;
|
|
|
-
|
|
|
- destroy_trace_option_files(topts);
|
|
|
- topts = create_trace_option_files(tr, t);
|
|
|
+ create_trace_option_files(tr, t);
|
|
|
}
|
|
|
|
|
|
static int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
|
@@ -4402,7 +4435,6 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
|
|
free_snapshot(tr);
|
|
|
}
|
|
|
#endif
|
|
|
- update_tracer_options(tr, t);
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
if (t->use_max_tr && !had_max_tr) {
|
|
@@ -4569,7 +4601,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
|
/* trace pipe does not show start of buffer */
|
|
|
cpumask_setall(iter->started);
|
|
|
|
|
|
- if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
|
|
+ if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
|
|
|
iter->iter_flags |= TRACE_FILE_LAT_FMT;
|
|
|
|
|
|
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
|
|
@@ -4626,11 +4658,13 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
|
|
|
static unsigned int
|
|
|
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
|
|
|
{
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
|
+
|
|
|
/* Iterators are static, they should be filled or empty */
|
|
|
if (trace_buffer_iter(iter, iter->cpu_file))
|
|
|
return POLLIN | POLLRDNORM;
|
|
|
|
|
|
- if (trace_flags & TRACE_ITER_BLOCK)
|
|
|
+ if (tr->trace_flags & TRACE_ITER_BLOCK)
|
|
|
/*
|
|
|
* Always select as readable when in blocking mode
|
|
|
*/
|
|
@@ -5047,7 +5081,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
|
|
|
struct trace_array *tr = inode->i_private;
|
|
|
|
|
|
/* disable tracing ? */
|
|
|
- if (trace_flags & TRACE_ITER_STOP_ON_FREE)
|
|
|
+ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
|
|
|
tracer_tracing_off(tr);
|
|
|
/* resize the ring buffer to 0 */
|
|
|
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
|
|
@@ -5080,7 +5114,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
|
|
|
if (tracing_disabled)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- if (!(trace_flags & TRACE_ITER_MARKERS))
|
|
|
+ if (!(tr->trace_flags & TRACE_ITER_MARKERS))
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (cnt > TRACE_BUF_SIZE)
|
|
@@ -6132,13 +6166,6 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
|
|
|
#include "trace_selftest.c"
|
|
|
#endif
|
|
|
|
|
|
-struct trace_option_dentry {
|
|
|
- struct tracer_opt *opt;
|
|
|
- struct tracer_flags *flags;
|
|
|
- struct trace_array *tr;
|
|
|
- struct dentry *entry;
|
|
|
-};
|
|
|
-
|
|
|
static ssize_t
|
|
|
trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
loff_t *ppos)
|
|
@@ -6191,14 +6218,51 @@ static const struct file_operations trace_options_fops = {
|
|
|
.llseek = generic_file_llseek,
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * In order to pass in both the trace_array descriptor as well as the index
|
|
|
+ * to the flag that the trace option file represents, the trace_array
|
|
|
+ * has a character array of trace_flags_index[], which holds the index
|
|
|
+ * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
|
|
|
+ * The address of this character array is passed to the flag option file
|
|
|
+ * read/write callbacks.
|
|
|
+ *
|
|
|
+ * In order to extract both the index and the trace_array descriptor,
|
|
|
+ * get_tr_index() uses the following algorithm.
|
|
|
+ *
|
|
|
+ * idx = *ptr;
|
|
|
+ *
|
|
|
+ * As the pointer itself contains the address of the index (remember
|
|
|
+ * index[1] == 1).
|
|
|
+ *
|
|
|
+ * Then to get the trace_array descriptor, by subtracting that index
|
|
|
+ * from the ptr, we get to the start of the index itself.
|
|
|
+ *
|
|
|
+ * ptr - idx == &index[0]
|
|
|
+ *
|
|
|
+ * Then a simple container_of() from that pointer gets us to the
|
|
|
+ * trace_array descriptor.
|
|
|
+ */
|
|
|
+static void get_tr_index(void *data, struct trace_array **ptr,
|
|
|
+ unsigned int *pindex)
|
|
|
+{
|
|
|
+ *pindex = *(unsigned char *)data;
|
|
|
+
|
|
|
+ *ptr = container_of(data - *pindex, struct trace_array,
|
|
|
+ trace_flags_index);
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t
|
|
|
trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
loff_t *ppos)
|
|
|
{
|
|
|
- long index = (long)filp->private_data;
|
|
|
+ void *tr_index = filp->private_data;
|
|
|
+ struct trace_array *tr;
|
|
|
+ unsigned int index;
|
|
|
char *buf;
|
|
|
|
|
|
- if (trace_flags & (1 << index))
|
|
|
+ get_tr_index(tr_index, &tr, &index);
|
|
|
+
|
|
|
+ if (tr->trace_flags & (1 << index))
|
|
|
buf = "1\n";
|
|
|
else
|
|
|
buf = "0\n";
|
|
@@ -6210,11 +6274,14 @@ static ssize_t
|
|
|
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
loff_t *ppos)
|
|
|
{
|
|
|
- struct trace_array *tr = &global_trace;
|
|
|
- long index = (long)filp->private_data;
|
|
|
+ void *tr_index = filp->private_data;
|
|
|
+ struct trace_array *tr;
|
|
|
+ unsigned int index;
|
|
|
unsigned long val;
|
|
|
int ret;
|
|
|
|
|
|
+ get_tr_index(tr_index, &tr, &index);
|
|
|
+
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -6298,21 +6365,39 @@ create_trace_option_file(struct trace_array *tr,
|
|
|
|
|
|
}
|
|
|
|
|
|
-static struct trace_option_dentry *
|
|
|
+static void
|
|
|
create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
|
|
|
{
|
|
|
struct trace_option_dentry *topts;
|
|
|
+ struct trace_options *tr_topts;
|
|
|
struct tracer_flags *flags;
|
|
|
struct tracer_opt *opts;
|
|
|
int cnt;
|
|
|
+ int i;
|
|
|
|
|
|
if (!tracer)
|
|
|
- return NULL;
|
|
|
+ return;
|
|
|
|
|
|
flags = tracer->flags;
|
|
|
|
|
|
if (!flags || !flags->opts)
|
|
|
- return NULL;
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this is an instance, only create flags for tracers
|
|
|
+ * the instance may have.
|
|
|
+ */
|
|
|
+ if (!trace_ok_for_array(tracer, tr))
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (i = 0; i < tr->nr_topts; i++) {
|
|
|
+ /*
|
|
|
+ * Check if these flags have already been added.
|
|
|
+ * Some tracers share flags.
|
|
|
+ */
|
|
|
+ if (tr->topts[i].tracer->flags == tracer->flags)
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
opts = flags->opts;
|
|
|
|
|
@@ -6321,27 +6406,27 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
|
|
|
|
|
|
topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
|
|
|
if (!topts)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- for (cnt = 0; opts[cnt].name; cnt++)
|
|
|
- create_trace_option_file(tr, &topts[cnt], flags,
|
|
|
- &opts[cnt]);
|
|
|
-
|
|
|
- return topts;
|
|
|
-}
|
|
|
-
|
|
|
-static void
|
|
|
-destroy_trace_option_files(struct trace_option_dentry *topts)
|
|
|
-{
|
|
|
- int cnt;
|
|
|
+ return;
|
|
|
|
|
|
- if (!topts)
|
|
|
+ tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!tr_topts) {
|
|
|
+ kfree(topts);
|
|
|
return;
|
|
|
+ }
|
|
|
|
|
|
- for (cnt = 0; topts[cnt].opt; cnt++)
|
|
|
- tracefs_remove(topts[cnt].entry);
|
|
|
+ tr->topts = tr_topts;
|
|
|
+ tr->topts[tr->nr_topts].tracer = tracer;
|
|
|
+ tr->topts[tr->nr_topts].topts = topts;
|
|
|
+ tr->nr_topts++;
|
|
|
|
|
|
- kfree(topts);
|
|
|
+ for (cnt = 0; opts[cnt].name; cnt++) {
|
|
|
+ create_trace_option_file(tr, &topts[cnt], flags,
|
|
|
+ &opts[cnt]);
|
|
|
+ WARN_ONCE(topts[cnt].entry == NULL,
|
|
|
+ "Failed to create trace option: %s",
|
|
|
+ opts[cnt].name);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static struct dentry *
|
|
@@ -6354,21 +6439,26 @@ create_trace_option_core_file(struct trace_array *tr,
|
|
|
if (!t_options)
|
|
|
return NULL;
|
|
|
|
|
|
- return trace_create_file(option, 0644, t_options, (void *)index,
|
|
|
- &trace_options_core_fops);
|
|
|
+ return trace_create_file(option, 0644, t_options,
|
|
|
+ (void *)&tr->trace_flags_index[index],
|
|
|
+ &trace_options_core_fops);
|
|
|
}
|
|
|
|
|
|
-static __init void create_trace_options_dir(struct trace_array *tr)
|
|
|
+static void create_trace_options_dir(struct trace_array *tr)
|
|
|
{
|
|
|
struct dentry *t_options;
|
|
|
+ bool top_level = tr == &global_trace;
|
|
|
int i;
|
|
|
|
|
|
t_options = trace_options_init_dentry(tr);
|
|
|
if (!t_options)
|
|
|
return;
|
|
|
|
|
|
- for (i = 0; trace_options[i]; i++)
|
|
|
- create_trace_option_core_file(tr, trace_options[i], i);
|
|
|
+ for (i = 0; trace_options[i]; i++) {
|
|
|
+ if (top_level ||
|
|
|
+ !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
|
|
|
+ create_trace_option_core_file(tr, trace_options[i], i);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static ssize_t
|
|
@@ -6435,7 +6525,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
|
|
|
{
|
|
|
enum ring_buffer_flags rb_flags;
|
|
|
|
|
|
- rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
|
|
+ rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
|
|
|
|
|
buf->tr = tr;
|
|
|
|
|
@@ -6505,6 +6595,30 @@ static void free_trace_buffers(struct trace_array *tr)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static void init_trace_flags_index(struct trace_array *tr)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ /* Used by the trace options files */
|
|
|
+ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
|
|
|
+ tr->trace_flags_index[i] = i;
|
|
|
+}
|
|
|
+
|
|
|
+static void __update_tracer_options(struct trace_array *tr)
|
|
|
+{
|
|
|
+ struct tracer *t;
|
|
|
+
|
|
|
+ for (t = trace_types; t; t = t->next)
|
|
|
+ add_tracer_options(tr, t);
|
|
|
+}
|
|
|
+
|
|
|
+static void update_tracer_options(struct trace_array *tr)
|
|
|
+{
|
|
|
+ mutex_lock(&trace_types_lock);
|
|
|
+ __update_tracer_options(tr);
|
|
|
+ mutex_unlock(&trace_types_lock);
|
|
|
+}
|
|
|
+
|
|
|
static int instance_mkdir(const char *name)
|
|
|
{
|
|
|
struct trace_array *tr;
|
|
@@ -6530,6 +6644,8 @@ static int instance_mkdir(const char *name)
|
|
|
if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
|
|
|
goto out_free_tr;
|
|
|
|
|
|
+ tr->trace_flags = global_trace.trace_flags;
|
|
|
+
|
|
|
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
|
|
|
|
|
|
raw_spin_lock_init(&tr->start_lock);
|
|
@@ -6555,6 +6671,8 @@ static int instance_mkdir(const char *name)
|
|
|
}
|
|
|
|
|
|
init_tracer_tracefs(tr, tr->dir);
|
|
|
+ init_trace_flags_index(tr);
|
|
|
+ __update_tracer_options(tr);
|
|
|
|
|
|
list_add(&tr->list, &ftrace_trace_arrays);
|
|
|
|
|
@@ -6580,6 +6698,7 @@ static int instance_rmdir(const char *name)
|
|
|
struct trace_array *tr;
|
|
|
int found = 0;
|
|
|
int ret;
|
|
|
+ int i;
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
@@ -6602,9 +6721,14 @@ static int instance_rmdir(const char *name)
|
|
|
tracing_set_nop(tr);
|
|
|
event_trace_del_tracer(tr);
|
|
|
ftrace_destroy_function_files(tr);
|
|
|
- debugfs_remove_recursive(tr->dir);
|
|
|
+ tracefs_remove_recursive(tr->dir);
|
|
|
free_trace_buffers(tr);
|
|
|
|
|
|
+ for (i = 0; i < tr->nr_topts; i++) {
|
|
|
+ kfree(tr->topts[i].topts);
|
|
|
+ }
|
|
|
+ kfree(tr->topts);
|
|
|
+
|
|
|
kfree(tr->name);
|
|
|
kfree(tr);
|
|
|
|
|
@@ -6666,6 +6790,8 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
|
|
|
trace_create_file("tracing_on", 0644, d_tracer,
|
|
|
tr, &rb_simple_fops);
|
|
|
|
|
|
+ create_trace_options_dir(tr);
|
|
|
+
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
trace_create_file("tracing_max_latency", 0644, d_tracer,
|
|
|
&tr->max_latency, &tracing_max_lat_fops);
|
|
@@ -6861,11 +6987,7 @@ static __init int tracer_init_tracefs(void)
|
|
|
|
|
|
create_trace_instances(d_tracer);
|
|
|
|
|
|
- create_trace_options_dir(&global_trace);
|
|
|
-
|
|
|
- /* If the tracer was started via cmdline, create options for it here */
|
|
|
- if (global_trace.current_trace != &nop_trace)
|
|
|
- update_tracer_options(&global_trace, global_trace.current_trace);
|
|
|
+ update_tracer_options(&global_trace);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -6964,6 +7086,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|
|
/* use static because iter can be a bit big for the stack */
|
|
|
static struct trace_iterator iter;
|
|
|
static atomic_t dump_running;
|
|
|
+ struct trace_array *tr = &global_trace;
|
|
|
unsigned int old_userobj;
|
|
|
unsigned long flags;
|
|
|
int cnt = 0, cpu;
|
|
@@ -6993,10 +7116,10 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|
|
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
|
}
|
|
|
|
|
|
- old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
|
|
|
+ old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
|
|
|
|
|
|
/* don't look at user memory in panic mode */
|
|
|
- trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
|
|
|
+ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
|
|
|
|
|
|
switch (oops_dump_mode) {
|
|
|
case DUMP_ALL:
|
|
@@ -7059,7 +7182,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|
|
printk(KERN_TRACE "---------------------------------\n");
|
|
|
|
|
|
out_enable:
|
|
|
- trace_flags |= old_userobj;
|
|
|
+ tr->trace_flags |= old_userobj;
|
|
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
@@ -7074,6 +7197,12 @@ __init static int tracer_alloc_buffers(void)
|
|
|
int ring_buf_size;
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
+ /*
|
|
|
+ * Make sure we don't accidently add more trace options
|
|
|
+ * than we have bits for.
|
|
|
+ */
|
|
|
+ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
|
|
|
+
|
|
|
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
|
|
|
goto out;
|
|
|
|
|
@@ -7132,6 +7261,8 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
ftrace_init_global_array_ops(&global_trace);
|
|
|
|
|
|
+ init_trace_flags_index(&global_trace);
|
|
|
+
|
|
|
register_tracer(&nop_trace);
|
|
|
|
|
|
/* All seems OK, enable tracing */
|
|
@@ -7148,12 +7279,7 @@ __init static int tracer_alloc_buffers(void)
|
|
|
INIT_LIST_HEAD(&global_trace.events);
|
|
|
list_add(&global_trace.list, &ftrace_trace_arrays);
|
|
|
|
|
|
- while (trace_boot_options) {
|
|
|
- char *option;
|
|
|
-
|
|
|
- option = strsep(&trace_boot_options, ",");
|
|
|
- trace_set_options(&global_trace, option);
|
|
|
- }
|
|
|
+ apply_trace_boot_options();
|
|
|
|
|
|
register_snapshot_cmd();
|
|
|
|