|
@@ -1,7 +1,7 @@
|
|
/*
|
|
/*
|
|
* ring buffer based function tracer
|
|
* ring buffer based function tracer
|
|
*
|
|
*
|
|
- * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
|
|
|
|
+ * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
|
|
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
|
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
*
|
|
* Originally taken from the RT patch by:
|
|
* Originally taken from the RT patch by:
|
|
@@ -251,9 +251,6 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
|
|
/* trace_types holds a link list of available tracers. */
|
|
/* trace_types holds a link list of available tracers. */
|
|
static struct tracer *trace_types __read_mostly;
|
|
static struct tracer *trace_types __read_mostly;
|
|
|
|
|
|
-/* current_trace points to the tracer that is currently active */
|
|
|
|
-static struct tracer *current_trace __read_mostly = &nop_trace;
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* trace_types_lock is used to protect the trace_types list.
|
|
* trace_types_lock is used to protect the trace_types list.
|
|
*/
|
|
*/
|
|
@@ -350,9 +347,6 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
|
|
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
|
|
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
|
|
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
|
|
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
|
|
|
|
|
|
-static int trace_stop_count;
|
|
|
|
-static DEFINE_RAW_SPINLOCK(tracing_start_lock);
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* trace_wake_up - wake up tasks waiting for trace input
|
|
* trace_wake_up - wake up tasks waiting for trace input
|
|
*
|
|
*
|
|
@@ -708,14 +702,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer *buf;
|
|
struct ring_buffer *buf;
|
|
|
|
|
|
- if (trace_stop_count)
|
|
|
|
|
|
+ if (tr->stop_count)
|
|
return;
|
|
return;
|
|
|
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
|
|
- if (!current_trace->allocated_snapshot) {
|
|
|
|
|
|
+ if (!tr->current_trace->allocated_snapshot) {
|
|
/* Only the nop tracer should hit this when disabling */
|
|
/* Only the nop tracer should hit this when disabling */
|
|
- WARN_ON_ONCE(current_trace != &nop_trace);
|
|
|
|
|
|
+ WARN_ON_ONCE(tr->current_trace != &nop_trace);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -742,11 +736,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- if (trace_stop_count)
|
|
|
|
|
|
+ if (tr->stop_count)
|
|
return;
|
|
return;
|
|
|
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
- if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
|
|
|
|
|
|
+ if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot))
|
|
return;
|
|
return;
|
|
|
|
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
@@ -853,8 +847,8 @@ int register_tracer(struct tracer *type)
|
|
|
|
|
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
|
if (type->selftest && !tracing_selftest_disabled) {
|
|
if (type->selftest && !tracing_selftest_disabled) {
|
|
- struct tracer *saved_tracer = current_trace;
|
|
|
|
struct trace_array *tr = &global_trace;
|
|
struct trace_array *tr = &global_trace;
|
|
|
|
+ struct tracer *saved_tracer = tr->current_trace;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Run a selftest on this tracer.
|
|
* Run a selftest on this tracer.
|
|
@@ -865,7 +859,7 @@ int register_tracer(struct tracer *type)
|
|
*/
|
|
*/
|
|
tracing_reset_online_cpus(tr);
|
|
tracing_reset_online_cpus(tr);
|
|
|
|
|
|
- current_trace = type;
|
|
|
|
|
|
+ tr->current_trace = type;
|
|
|
|
|
|
if (type->use_max_tr) {
|
|
if (type->use_max_tr) {
|
|
/* If we expanded the buffers, make sure the max is expanded too */
|
|
/* If we expanded the buffers, make sure the max is expanded too */
|
|
@@ -879,7 +873,7 @@ int register_tracer(struct tracer *type)
|
|
pr_info("Testing tracer %s: ", type->name);
|
|
pr_info("Testing tracer %s: ", type->name);
|
|
ret = type->selftest(type, tr);
|
|
ret = type->selftest(type, tr);
|
|
/* the test is responsible for resetting too */
|
|
/* the test is responsible for resetting too */
|
|
- current_trace = saved_tracer;
|
|
|
|
|
|
+ tr->current_trace = saved_tracer;
|
|
if (ret) {
|
|
if (ret) {
|
|
printk(KERN_CONT "FAILED!\n");
|
|
printk(KERN_CONT "FAILED!\n");
|
|
/* Add the warning after printing 'FAILED' */
|
|
/* Add the warning after printing 'FAILED' */
|
|
@@ -997,7 +991,7 @@ static void trace_init_cmdlines(void)
|
|
|
|
|
|
int is_tracing_stopped(void)
|
|
int is_tracing_stopped(void)
|
|
{
|
|
{
|
|
- return trace_stop_count;
|
|
|
|
|
|
+ return global_trace.stop_count;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1029,12 +1023,12 @@ void tracing_start(void)
|
|
if (tracing_disabled)
|
|
if (tracing_disabled)
|
|
return;
|
|
return;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&tracing_start_lock, flags);
|
|
|
|
- if (--trace_stop_count) {
|
|
|
|
- if (trace_stop_count < 0) {
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&global_trace.start_lock, flags);
|
|
|
|
+ if (--global_trace.stop_count) {
|
|
|
|
+ if (global_trace.stop_count < 0) {
|
|
/* Someone screwed up their debugging */
|
|
/* Someone screwed up their debugging */
|
|
WARN_ON_ONCE(1);
|
|
WARN_ON_ONCE(1);
|
|
- trace_stop_count = 0;
|
|
|
|
|
|
+ global_trace.stop_count = 0;
|
|
}
|
|
}
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
@@ -1054,7 +1048,38 @@ void tracing_start(void)
|
|
|
|
|
|
ftrace_start();
|
|
ftrace_start();
|
|
out:
|
|
out:
|
|
- raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void tracing_start_tr(struct trace_array *tr)
|
|
|
|
+{
|
|
|
|
+ struct ring_buffer *buffer;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ if (tracing_disabled)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* If global, we need to also start the max tracer */
|
|
|
|
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
|
|
+ return tracing_start();
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
|
|
|
|
+
|
|
|
|
+ if (--tr->stop_count) {
|
|
|
|
+ if (tr->stop_count < 0) {
|
|
|
|
+ /* Someone screwed up their debugging */
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
|
+ tr->stop_count = 0;
|
|
|
|
+ }
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ buffer = tr->buffer;
|
|
|
|
+ if (buffer)
|
|
|
|
+ ring_buffer_record_enable(buffer);
|
|
|
|
+
|
|
|
|
+ out:
|
|
|
|
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1069,8 +1094,8 @@ void tracing_stop(void)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
ftrace_stop();
|
|
ftrace_stop();
|
|
- raw_spin_lock_irqsave(&tracing_start_lock, flags);
|
|
|
|
- if (trace_stop_count++)
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&global_trace.start_lock, flags);
|
|
|
|
+ if (global_trace.stop_count++)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
/* Prevent the buffers from switching */
|
|
/* Prevent the buffers from switching */
|
|
@@ -1087,7 +1112,28 @@ void tracing_stop(void)
|
|
arch_spin_unlock(&ftrace_max_lock);
|
|
arch_spin_unlock(&ftrace_max_lock);
|
|
|
|
|
|
out:
|
|
out:
|
|
- raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void tracing_stop_tr(struct trace_array *tr)
|
|
|
|
+{
|
|
|
|
+ struct ring_buffer *buffer;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ /* If global, we need to also stop the max tracer */
|
|
|
|
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
|
|
+ return tracing_stop();
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
|
|
|
|
+ if (tr->stop_count++)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ buffer = tr->buffer;
|
|
|
|
+ if (buffer)
|
|
|
|
+ ring_buffer_record_disable(buffer);
|
|
|
|
+
|
|
|
|
+ out:
|
|
|
|
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
void trace_stop_cmdline_recording(void);
|
|
void trace_stop_cmdline_recording(void);
|
|
@@ -1956,6 +2002,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
|
static void *s_start(struct seq_file *m, loff_t *pos)
|
|
static void *s_start(struct seq_file *m, loff_t *pos)
|
|
{
|
|
{
|
|
struct trace_iterator *iter = m->private;
|
|
struct trace_iterator *iter = m->private;
|
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
int cpu_file = iter->cpu_file;
|
|
int cpu_file = iter->cpu_file;
|
|
void *p = NULL;
|
|
void *p = NULL;
|
|
loff_t l = 0;
|
|
loff_t l = 0;
|
|
@@ -1968,8 +2015,8 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
|
* will point to the same string as current_trace->name.
|
|
* will point to the same string as current_trace->name.
|
|
*/
|
|
*/
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
- if (unlikely(current_trace && iter->trace->name != current_trace->name))
|
|
|
|
- *iter->trace = *current_trace;
|
|
|
|
|
|
+ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
|
|
|
|
+ *iter->trace = *tr->current_trace;
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
if (iter->snapshot && iter->trace->use_max_tr)
|
|
if (iter->snapshot && iter->trace->use_max_tr)
|
|
@@ -2099,7 +2146,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
|
|
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
|
|
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
|
|
struct trace_array *tr = iter->tr;
|
|
struct trace_array *tr = iter->tr;
|
|
struct trace_array_cpu *data = tr->data[tr->cpu];
|
|
struct trace_array_cpu *data = tr->data[tr->cpu];
|
|
- struct tracer *type = current_trace;
|
|
|
|
|
|
+ struct tracer *type = iter->trace;
|
|
unsigned long entries;
|
|
unsigned long entries;
|
|
unsigned long total;
|
|
unsigned long total;
|
|
const char *name = "preemption";
|
|
const char *name = "preemption";
|
|
@@ -2478,7 +2525,8 @@ static const struct seq_operations tracer_seq_ops = {
|
|
static struct trace_iterator *
|
|
static struct trace_iterator *
|
|
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
|
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
|
{
|
|
{
|
|
- long cpu_file = (long) inode->i_private;
|
|
|
|
|
|
+ struct trace_cpu *tc = inode->i_private;
|
|
|
|
+ struct trace_array *tr = tc->tr;
|
|
struct trace_iterator *iter;
|
|
struct trace_iterator *iter;
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
@@ -2503,19 +2551,20 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
|
if (!iter->trace)
|
|
if (!iter->trace)
|
|
goto fail;
|
|
goto fail;
|
|
|
|
|
|
- *iter->trace = *current_trace;
|
|
|
|
|
|
+ *iter->trace = *tr->current_trace;
|
|
|
|
|
|
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
|
|
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
|
|
goto fail;
|
|
goto fail;
|
|
|
|
|
|
- if (current_trace->print_max || snapshot)
|
|
|
|
|
|
+ /* Currently only the top directory has a snapshot */
|
|
|
|
+ if (tr->current_trace->print_max || snapshot)
|
|
iter->tr = &max_tr;
|
|
iter->tr = &max_tr;
|
|
else
|
|
else
|
|
- iter->tr = &global_trace;
|
|
|
|
|
|
+ iter->tr = tr;
|
|
iter->snapshot = snapshot;
|
|
iter->snapshot = snapshot;
|
|
iter->pos = -1;
|
|
iter->pos = -1;
|
|
mutex_init(&iter->mutex);
|
|
mutex_init(&iter->mutex);
|
|
- iter->cpu_file = cpu_file;
|
|
|
|
|
|
+ iter->cpu_file = tc->cpu;
|
|
|
|
|
|
/* Notify the tracer early; before we stop tracing. */
|
|
/* Notify the tracer early; before we stop tracing. */
|
|
if (iter->trace && iter->trace->open)
|
|
if (iter->trace && iter->trace->open)
|
|
@@ -2531,7 +2580,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
|
|
|
|
|
/* stop the trace while dumping if we are not opening "snapshot" */
|
|
/* stop the trace while dumping if we are not opening "snapshot" */
|
|
if (!iter->snapshot)
|
|
if (!iter->snapshot)
|
|
- tracing_stop();
|
|
|
|
|
|
+ tracing_stop_tr(tr);
|
|
|
|
|
|
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
|
|
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
|
|
for_each_tracing_cpu(cpu) {
|
|
for_each_tracing_cpu(cpu) {
|
|
@@ -2578,6 +2627,7 @@ static int tracing_release(struct inode *inode, struct file *file)
|
|
{
|
|
{
|
|
struct seq_file *m = file->private_data;
|
|
struct seq_file *m = file->private_data;
|
|
struct trace_iterator *iter;
|
|
struct trace_iterator *iter;
|
|
|
|
+ struct trace_array *tr;
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
if (!(file->f_mode & FMODE_READ))
|
|
if (!(file->f_mode & FMODE_READ))
|
|
@@ -2585,6 +2635,12 @@ static int tracing_release(struct inode *inode, struct file *file)
|
|
|
|
|
|
iter = m->private;
|
|
iter = m->private;
|
|
|
|
|
|
|
|
+ /* Only the global tracer has a matching max_tr */
|
|
|
|
+ if (iter->tr == &max_tr)
|
|
|
|
+ tr = &global_trace;
|
|
|
|
+ else
|
|
|
|
+ tr = iter->tr;
|
|
|
|
+
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
for_each_tracing_cpu(cpu) {
|
|
for_each_tracing_cpu(cpu) {
|
|
if (iter->buffer_iter[cpu])
|
|
if (iter->buffer_iter[cpu])
|
|
@@ -2596,7 +2652,7 @@ static int tracing_release(struct inode *inode, struct file *file)
|
|
|
|
|
|
if (!iter->snapshot)
|
|
if (!iter->snapshot)
|
|
/* reenable tracing if it was previously enabled */
|
|
/* reenable tracing if it was previously enabled */
|
|
- tracing_start();
|
|
|
|
|
|
+ tracing_start_tr(tr);
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
mutex_destroy(&iter->mutex);
|
|
mutex_destroy(&iter->mutex);
|
|
@@ -2615,12 +2671,13 @@ static int tracing_open(struct inode *inode, struct file *file)
|
|
/* If this file was open for write, then erase contents */
|
|
/* If this file was open for write, then erase contents */
|
|
if ((file->f_mode & FMODE_WRITE) &&
|
|
if ((file->f_mode & FMODE_WRITE) &&
|
|
(file->f_flags & O_TRUNC)) {
|
|
(file->f_flags & O_TRUNC)) {
|
|
- long cpu = (long) inode->i_private;
|
|
|
|
|
|
+ struct trace_cpu *tc = inode->i_private;
|
|
|
|
+ struct trace_array *tr = tc->tr;
|
|
|
|
|
|
- if (cpu == RING_BUFFER_ALL_CPUS)
|
|
|
|
- tracing_reset_online_cpus(&global_trace);
|
|
|
|
|
|
+ if (tc->cpu == RING_BUFFER_ALL_CPUS)
|
|
|
|
+ tracing_reset_online_cpus(tr);
|
|
else
|
|
else
|
|
- tracing_reset(&global_trace, cpu);
|
|
|
|
|
|
+ tracing_reset(tr, tc->cpu);
|
|
}
|
|
}
|
|
|
|
|
|
if (file->f_mode & FMODE_READ) {
|
|
if (file->f_mode & FMODE_READ) {
|
|
@@ -2767,8 +2824,9 @@ static ssize_t
|
|
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
|
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
|
size_t count, loff_t *ppos)
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
- int err, cpu;
|
|
|
|
|
|
+ struct trace_array *tr = filp->private_data;
|
|
cpumask_var_t tracing_cpumask_new;
|
|
cpumask_var_t tracing_cpumask_new;
|
|
|
|
+ int err, cpu;
|
|
|
|
|
|
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
|
|
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
@@ -2788,13 +2846,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
|
*/
|
|
*/
|
|
if (cpumask_test_cpu(cpu, tracing_cpumask) &&
|
|
if (cpumask_test_cpu(cpu, tracing_cpumask) &&
|
|
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
|
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
|
- atomic_inc(&global_trace.data[cpu]->disabled);
|
|
|
|
- ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
|
|
|
|
|
|
+ atomic_inc(&tr->data[cpu]->disabled);
|
|
|
|
+ ring_buffer_record_disable_cpu(tr->buffer, cpu);
|
|
}
|
|
}
|
|
if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
|
|
if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
|
|
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
|
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
|
|
- atomic_dec(&global_trace.data[cpu]->disabled);
|
|
|
|
- ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
|
|
|
|
|
|
+ atomic_dec(&tr->data[cpu]->disabled);
|
|
|
|
+ ring_buffer_record_enable_cpu(tr->buffer, cpu);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
arch_spin_unlock(&ftrace_max_lock);
|
|
arch_spin_unlock(&ftrace_max_lock);
|
|
@@ -2823,12 +2881,13 @@ static const struct file_operations tracing_cpumask_fops = {
|
|
static int tracing_trace_options_show(struct seq_file *m, void *v)
|
|
static int tracing_trace_options_show(struct seq_file *m, void *v)
|
|
{
|
|
{
|
|
struct tracer_opt *trace_opts;
|
|
struct tracer_opt *trace_opts;
|
|
|
|
+ struct trace_array *tr = m->private;
|
|
u32 tracer_flags;
|
|
u32 tracer_flags;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
- tracer_flags = current_trace->flags->val;
|
|
|
|
- trace_opts = current_trace->flags->opts;
|
|
|
|
|
|
+ tracer_flags = tr->current_trace->flags->val;
|
|
|
|
+ trace_opts = tr->current_trace->flags->opts;
|
|
|
|
|
|
for (i = 0; trace_options[i]; i++) {
|
|
for (i = 0; trace_options[i]; i++) {
|
|
if (trace_flags & (1 << i))
|
|
if (trace_flags & (1 << i))
|
|
@@ -2892,15 +2951,15 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-int set_tracer_flag(unsigned int mask, int enabled)
|
|
|
|
|
|
+int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
|
|
{
|
|
{
|
|
/* do nothing if flag is already set */
|
|
/* do nothing if flag is already set */
|
|
if (!!(trace_flags & mask) == !!enabled)
|
|
if (!!(trace_flags & mask) == !!enabled)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
/* Give the tracer a chance to approve the change */
|
|
/* Give the tracer a chance to approve the change */
|
|
- if (current_trace->flag_changed)
|
|
|
|
- if (current_trace->flag_changed(current_trace, mask, !!enabled))
|
|
|
|
|
|
+ if (tr->current_trace->flag_changed)
|
|
|
|
+ if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
if (enabled)
|
|
if (enabled)
|
|
@@ -2924,7 +2983,7 @@ int set_tracer_flag(unsigned int mask, int enabled)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int trace_set_options(char *option)
|
|
|
|
|
|
+static int trace_set_options(struct trace_array *tr, char *option)
|
|
{
|
|
{
|
|
char *cmp;
|
|
char *cmp;
|
|
int neg = 0;
|
|
int neg = 0;
|
|
@@ -2942,14 +3001,14 @@ static int trace_set_options(char *option)
|
|
|
|
|
|
for (i = 0; trace_options[i]; i++) {
|
|
for (i = 0; trace_options[i]; i++) {
|
|
if (strcmp(cmp, trace_options[i]) == 0) {
|
|
if (strcmp(cmp, trace_options[i]) == 0) {
|
|
- ret = set_tracer_flag(1 << i, !neg);
|
|
|
|
|
|
+ ret = set_tracer_flag(tr, 1 << i, !neg);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/* If no option could be set, test the specific tracer options */
|
|
/* If no option could be set, test the specific tracer options */
|
|
if (!trace_options[i])
|
|
if (!trace_options[i])
|
|
- ret = set_tracer_option(current_trace, cmp, neg);
|
|
|
|
|
|
+ ret = set_tracer_option(tr->current_trace, cmp, neg);
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
@@ -2960,6 +3019,8 @@ static ssize_t
|
|
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
size_t cnt, loff_t *ppos)
|
|
size_t cnt, loff_t *ppos)
|
|
{
|
|
{
|
|
|
|
+ struct seq_file *m = filp->private_data;
|
|
|
|
+ struct trace_array *tr = m->private;
|
|
char buf[64];
|
|
char buf[64];
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
@@ -2971,7 +3032,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
|
|
|
buf[cnt] = 0;
|
|
buf[cnt] = 0;
|
|
|
|
|
|
- ret = trace_set_options(buf);
|
|
|
|
|
|
+ ret = trace_set_options(tr, buf);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -2984,7 +3045,8 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
|
|
{
|
|
{
|
|
if (tracing_disabled)
|
|
if (tracing_disabled)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
- return single_open(file, tracing_trace_options_show, NULL);
|
|
|
|
|
|
+
|
|
|
|
+ return single_open(file, tracing_trace_options_show, inode->i_private);
|
|
}
|
|
}
|
|
|
|
|
|
static const struct file_operations tracing_iter_fops = {
|
|
static const struct file_operations tracing_iter_fops = {
|
|
@@ -3082,11 +3144,12 @@ static ssize_t
|
|
tracing_set_trace_read(struct file *filp, char __user *ubuf,
|
|
tracing_set_trace_read(struct file *filp, char __user *ubuf,
|
|
size_t cnt, loff_t *ppos)
|
|
size_t cnt, loff_t *ppos)
|
|
{
|
|
{
|
|
|
|
+ struct trace_array *tr = filp->private_data;
|
|
char buf[MAX_TRACER_SIZE+2];
|
|
char buf[MAX_TRACER_SIZE+2];
|
|
int r;
|
|
int r;
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
- r = sprintf(buf, "%s\n", current_trace->name);
|
|
|
|
|
|
+ r = sprintf(buf, "%s\n", tr->current_trace->name);
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
@@ -3130,7 +3193,8 @@ static int resize_buffer_duplicate_size(struct trace_array *tr,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
|
|
|
|
|
|
+static int __tracing_resize_ring_buffer(struct trace_array *tr,
|
|
|
|
+ unsigned long size, int cpu)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
@@ -3142,20 +3206,20 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
|
|
ring_buffer_expanded = 1;
|
|
ring_buffer_expanded = 1;
|
|
|
|
|
|
/* May be called before buffers are initialized */
|
|
/* May be called before buffers are initialized */
|
|
- if (!global_trace.buffer)
|
|
|
|
|
|
+ if (!tr->buffer)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- ret = ring_buffer_resize(global_trace.buffer, size, cpu);
|
|
|
|
|
|
+ ret = ring_buffer_resize(tr->buffer, size, cpu);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- if (!current_trace->use_max_tr)
|
|
|
|
|
|
+ if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
|
|
|
|
+ !tr->current_trace->use_max_tr)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
ret = ring_buffer_resize(max_tr.buffer, size, cpu);
|
|
ret = ring_buffer_resize(max_tr.buffer, size, cpu);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
- int r = resize_buffer_duplicate_size(&global_trace,
|
|
|
|
- &global_trace, cpu);
|
|
|
|
|
|
+ int r = resize_buffer_duplicate_size(tr, tr, cpu);
|
|
if (r < 0) {
|
|
if (r < 0) {
|
|
/*
|
|
/*
|
|
* AARGH! We are left with different
|
|
* AARGH! We are left with different
|
|
@@ -3184,14 +3248,15 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
|
|
|
|
|
|
out:
|
|
out:
|
|
if (cpu == RING_BUFFER_ALL_CPUS)
|
|
if (cpu == RING_BUFFER_ALL_CPUS)
|
|
- set_buffer_entries(&global_trace, size);
|
|
|
|
|
|
+ set_buffer_entries(tr, size);
|
|
else
|
|
else
|
|
- global_trace.data[cpu]->entries = size;
|
|
|
|
|
|
+ tr->data[cpu]->entries = size;
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
|
|
|
|
|
|
+static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
|
|
|
|
+ unsigned long size, int cpu_id)
|
|
{
|
|
{
|
|
int ret = size;
|
|
int ret = size;
|
|
|
|
|
|
@@ -3205,7 +3270,7 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- ret = __tracing_resize_ring_buffer(size, cpu_id);
|
|
|
|
|
|
+ ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
|
|
|
|
@@ -3232,7 +3297,7 @@ int tracing_update_buffers(void)
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
if (!ring_buffer_expanded)
|
|
if (!ring_buffer_expanded)
|
|
- ret = __tracing_resize_ring_buffer(trace_buf_size,
|
|
|
|
|
|
+ ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
|
|
RING_BUFFER_ALL_CPUS);
|
|
RING_BUFFER_ALL_CPUS);
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
@@ -3242,7 +3307,7 @@ int tracing_update_buffers(void)
|
|
struct trace_option_dentry;
|
|
struct trace_option_dentry;
|
|
|
|
|
|
static struct trace_option_dentry *
|
|
static struct trace_option_dentry *
|
|
-create_trace_option_files(struct tracer *tracer);
|
|
|
|
|
|
+create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
|
|
|
|
|
|
static void
|
|
static void
|
|
destroy_trace_option_files(struct trace_option_dentry *topts);
|
|
destroy_trace_option_files(struct trace_option_dentry *topts);
|
|
@@ -3258,7 +3323,7 @@ static int tracing_set_tracer(const char *buf)
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
if (!ring_buffer_expanded) {
|
|
if (!ring_buffer_expanded) {
|
|
- ret = __tracing_resize_ring_buffer(trace_buf_size,
|
|
|
|
|
|
+ ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
|
|
RING_BUFFER_ALL_CPUS);
|
|
RING_BUFFER_ALL_CPUS);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
goto out;
|
|
goto out;
|
|
@@ -3273,18 +3338,18 @@ static int tracing_set_tracer(const char *buf)
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
- if (t == current_trace)
|
|
|
|
|
|
+ if (t == tr->current_trace)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
trace_branch_disable();
|
|
trace_branch_disable();
|
|
|
|
|
|
- current_trace->enabled = false;
|
|
|
|
|
|
+ tr->current_trace->enabled = false;
|
|
|
|
|
|
- if (current_trace->reset)
|
|
|
|
- current_trace->reset(tr);
|
|
|
|
|
|
+ if (tr->current_trace->reset)
|
|
|
|
+ tr->current_trace->reset(tr);
|
|
|
|
|
|
- had_max_tr = current_trace->allocated_snapshot;
|
|
|
|
- current_trace = &nop_trace;
|
|
|
|
|
|
+ had_max_tr = tr->current_trace->allocated_snapshot;
|
|
|
|
+ tr->current_trace = &nop_trace;
|
|
|
|
|
|
if (had_max_tr && !t->use_max_tr) {
|
|
if (had_max_tr && !t->use_max_tr) {
|
|
/*
|
|
/*
|
|
@@ -3303,11 +3368,11 @@ static int tracing_set_tracer(const char *buf)
|
|
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
|
|
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
|
|
set_buffer_entries(&max_tr, 1);
|
|
set_buffer_entries(&max_tr, 1);
|
|
tracing_reset_online_cpus(&max_tr);
|
|
tracing_reset_online_cpus(&max_tr);
|
|
- current_trace->allocated_snapshot = false;
|
|
|
|
|
|
+ tr->current_trace->allocated_snapshot = false;
|
|
}
|
|
}
|
|
destroy_trace_option_files(topts);
|
|
destroy_trace_option_files(topts);
|
|
|
|
|
|
- topts = create_trace_option_files(t);
|
|
|
|
|
|
+ topts = create_trace_option_files(tr, t);
|
|
if (t->use_max_tr && !had_max_tr) {
|
|
if (t->use_max_tr && !had_max_tr) {
|
|
/* we need to make per cpu buffer sizes equivalent */
|
|
/* we need to make per cpu buffer sizes equivalent */
|
|
ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
|
|
ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
|
|
@@ -3323,8 +3388,8 @@ static int tracing_set_tracer(const char *buf)
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- current_trace = t;
|
|
|
|
- current_trace->enabled = true;
|
|
|
|
|
|
+ tr->current_trace = t;
|
|
|
|
+ tr->current_trace->enabled = true;
|
|
trace_branch_enable(tr);
|
|
trace_branch_enable(tr);
|
|
out:
|
|
out:
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
@@ -3398,7 +3463,8 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
|
|
|
|
|
|
static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
{
|
|
{
|
|
- long cpu_file = (long) inode->i_private;
|
|
|
|
|
|
+ struct trace_cpu *tc = inode->i_private;
|
|
|
|
+ struct trace_array *tr = tc->tr;
|
|
struct trace_iterator *iter;
|
|
struct trace_iterator *iter;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
@@ -3423,7 +3489,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
- *iter->trace = *current_trace;
|
|
|
|
|
|
+ *iter->trace = *tr->current_trace;
|
|
|
|
|
|
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
|
|
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
@@ -3440,8 +3506,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
if (trace_clocks[trace_clock_id].in_ns)
|
|
if (trace_clocks[trace_clock_id].in_ns)
|
|
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
|
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
|
|
|
|
|
|
- iter->cpu_file = cpu_file;
|
|
|
|
- iter->tr = &global_trace;
|
|
|
|
|
|
+ iter->cpu_file = tc->cpu;
|
|
|
|
+ iter->tr = tc->tr;
|
|
mutex_init(&iter->mutex);
|
|
mutex_init(&iter->mutex);
|
|
filp->private_data = iter;
|
|
filp->private_data = iter;
|
|
|
|
|
|
@@ -3563,6 +3629,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|
size_t cnt, loff_t *ppos)
|
|
size_t cnt, loff_t *ppos)
|
|
{
|
|
{
|
|
struct trace_iterator *iter = filp->private_data;
|
|
struct trace_iterator *iter = filp->private_data;
|
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
ssize_t sret;
|
|
ssize_t sret;
|
|
|
|
|
|
/* return any leftover data */
|
|
/* return any leftover data */
|
|
@@ -3574,8 +3641,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|
|
|
|
|
/* copy the tracer to avoid using a global lock all around */
|
|
/* copy the tracer to avoid using a global lock all around */
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
- if (unlikely(iter->trace->name != current_trace->name))
|
|
|
|
- *iter->trace = *current_trace;
|
|
|
|
|
|
+ if (unlikely(iter->trace->name != tr->current_trace->name))
|
|
|
|
+ *iter->trace = *tr->current_trace;
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3731,6 +3798,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|
.ops = &tracing_pipe_buf_ops,
|
|
.ops = &tracing_pipe_buf_ops,
|
|
.spd_release = tracing_spd_release_pipe,
|
|
.spd_release = tracing_spd_release_pipe,
|
|
};
|
|
};
|
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
ssize_t ret;
|
|
ssize_t ret;
|
|
size_t rem;
|
|
size_t rem;
|
|
unsigned int i;
|
|
unsigned int i;
|
|
@@ -3740,8 +3808,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|
|
|
|
|
/* copy the tracer to avoid using a global lock all around */
|
|
/* copy the tracer to avoid using a global lock all around */
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
- if (unlikely(iter->trace->name != current_trace->name))
|
|
|
|
- *iter->trace = *current_trace;
|
|
|
|
|
|
+ if (unlikely(iter->trace->name != tr->current_trace->name))
|
|
|
|
+ *iter->trace = *tr->current_trace;
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
mutex_lock(&iter->mutex);
|
|
mutex_lock(&iter->mutex);
|
|
@@ -3803,43 +3871,19 @@ out_err:
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
-struct ftrace_entries_info {
|
|
|
|
- struct trace_array *tr;
|
|
|
|
- int cpu;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static int tracing_entries_open(struct inode *inode, struct file *filp)
|
|
|
|
-{
|
|
|
|
- struct ftrace_entries_info *info;
|
|
|
|
-
|
|
|
|
- if (tracing_disabled)
|
|
|
|
- return -ENODEV;
|
|
|
|
-
|
|
|
|
- info = kzalloc(sizeof(*info), GFP_KERNEL);
|
|
|
|
- if (!info)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- info->tr = &global_trace;
|
|
|
|
- info->cpu = (unsigned long)inode->i_private;
|
|
|
|
-
|
|
|
|
- filp->private_data = info;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
tracing_entries_read(struct file *filp, char __user *ubuf,
|
|
tracing_entries_read(struct file *filp, char __user *ubuf,
|
|
size_t cnt, loff_t *ppos)
|
|
size_t cnt, loff_t *ppos)
|
|
{
|
|
{
|
|
- struct ftrace_entries_info *info = filp->private_data;
|
|
|
|
- struct trace_array *tr = info->tr;
|
|
|
|
|
|
+ struct trace_cpu *tc = filp->private_data;
|
|
|
|
+ struct trace_array *tr = tc->tr;
|
|
char buf[64];
|
|
char buf[64];
|
|
int r = 0;
|
|
int r = 0;
|
|
ssize_t ret;
|
|
ssize_t ret;
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
- if (info->cpu == RING_BUFFER_ALL_CPUS) {
|
|
|
|
|
|
+ if (tc->cpu == RING_BUFFER_ALL_CPUS) {
|
|
int cpu, buf_size_same;
|
|
int cpu, buf_size_same;
|
|
unsigned long size;
|
|
unsigned long size;
|
|
|
|
|
|
@@ -3866,7 +3910,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
|
|
} else
|
|
} else
|
|
r = sprintf(buf, "X\n");
|
|
r = sprintf(buf, "X\n");
|
|
} else
|
|
} else
|
|
- r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
|
|
|
|
|
|
+ r = sprintf(buf, "%lu\n", tr->data[tc->cpu]->entries >> 10);
|
|
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
@@ -3878,7 +3922,7 @@ static ssize_t
|
|
tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
size_t cnt, loff_t *ppos)
|
|
size_t cnt, loff_t *ppos)
|
|
{
|
|
{
|
|
- struct ftrace_entries_info *info = filp->private_data;
|
|
|
|
|
|
+ struct trace_cpu *tc = filp->private_data;
|
|
unsigned long val;
|
|
unsigned long val;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
@@ -3893,7 +3937,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
/* value is in KB */
|
|
/* value is in KB */
|
|
val <<= 10;
|
|
val <<= 10;
|
|
|
|
|
|
- ret = tracing_resize_ring_buffer(val, info->cpu);
|
|
|
|
|
|
+ ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -3902,16 +3946,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
return cnt;
|
|
return cnt;
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-tracing_entries_release(struct inode *inode, struct file *filp)
|
|
|
|
-{
|
|
|
|
- struct ftrace_entries_info *info = filp->private_data;
|
|
|
|
-
|
|
|
|
- kfree(info);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
tracing_total_entries_read(struct file *filp, char __user *ubuf,
|
|
tracing_total_entries_read(struct file *filp, char __user *ubuf,
|
|
size_t cnt, loff_t *ppos)
|
|
size_t cnt, loff_t *ppos)
|
|
@@ -3953,11 +3987,13 @@ tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
|
|
static int
|
|
static int
|
|
tracing_free_buffer_release(struct inode *inode, struct file *filp)
|
|
tracing_free_buffer_release(struct inode *inode, struct file *filp)
|
|
{
|
|
{
|
|
|
|
+ struct trace_array *tr = inode->i_private;
|
|
|
|
+
|
|
/* disable tracing ? */
|
|
/* disable tracing ? */
|
|
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
|
|
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
|
|
tracing_off();
|
|
tracing_off();
|
|
/* resize the ring buffer to 0 */
|
|
/* resize the ring buffer to 0 */
|
|
- tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
|
|
|
|
|
|
+ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -4068,13 +4104,14 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
|
|
|
|
|
|
static int tracing_clock_show(struct seq_file *m, void *v)
|
|
static int tracing_clock_show(struct seq_file *m, void *v)
|
|
{
|
|
{
|
|
|
|
+ struct trace_array *tr = m->private;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
|
|
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
|
|
seq_printf(m,
|
|
seq_printf(m,
|
|
"%s%s%s%s", i ? " " : "",
|
|
"%s%s%s%s", i ? " " : "",
|
|
- i == trace_clock_id ? "[" : "", trace_clocks[i].name,
|
|
|
|
- i == trace_clock_id ? "]" : "");
|
|
|
|
|
|
+ i == tr->clock_id ? "[" : "", trace_clocks[i].name,
|
|
|
|
+ i == tr->clock_id ? "]" : "");
|
|
seq_putc(m, '\n');
|
|
seq_putc(m, '\n');
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -4083,6 +4120,8 @@ static int tracing_clock_show(struct seq_file *m, void *v)
|
|
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
|
|
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
|
|
size_t cnt, loff_t *fpos)
|
|
size_t cnt, loff_t *fpos)
|
|
{
|
|
{
|
|
|
|
+ struct seq_file *m = filp->private_data;
|
|
|
|
+ struct trace_array *tr = m->private;
|
|
char buf[64];
|
|
char buf[64];
|
|
const char *clockstr;
|
|
const char *clockstr;
|
|
int i;
|
|
int i;
|
|
@@ -4104,12 +4143,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
|
|
if (i == ARRAY_SIZE(trace_clocks))
|
|
if (i == ARRAY_SIZE(trace_clocks))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- trace_clock_id = i;
|
|
|
|
-
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
- ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
|
|
|
|
- if (max_tr.buffer)
|
|
|
|
|
|
+ tr->clock_id = i;
|
|
|
|
+
|
|
|
|
+ ring_buffer_set_clock(tr->buffer, trace_clocks[i].func);
|
|
|
|
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL && max_tr.buffer)
|
|
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
|
|
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -4130,20 +4169,37 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
|
|
{
|
|
{
|
|
if (tracing_disabled)
|
|
if (tracing_disabled)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
- return single_open(file, tracing_clock_show, NULL);
|
|
|
|
|
|
+
|
|
|
|
+ return single_open(file, tracing_clock_show, inode->i_private);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
|
static int tracing_snapshot_open(struct inode *inode, struct file *file)
|
|
{
|
|
{
|
|
|
|
+ struct trace_cpu *tc = inode->i_private;
|
|
struct trace_iterator *iter;
|
|
struct trace_iterator *iter;
|
|
|
|
+ struct seq_file *m;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
if (file->f_mode & FMODE_READ) {
|
|
if (file->f_mode & FMODE_READ) {
|
|
iter = __tracing_open(inode, file, true);
|
|
iter = __tracing_open(inode, file, true);
|
|
if (IS_ERR(iter))
|
|
if (IS_ERR(iter))
|
|
ret = PTR_ERR(iter);
|
|
ret = PTR_ERR(iter);
|
|
|
|
+ } else {
|
|
|
|
+ /* Writes still need the seq_file to hold the private data */
|
|
|
|
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
|
|
|
|
+ if (!m)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
|
|
|
+ if (!iter) {
|
|
|
|
+ kfree(m);
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ iter->tr = tc->tr;
|
|
|
|
+ m->private = iter;
|
|
|
|
+ file->private_data = m;
|
|
}
|
|
}
|
|
|
|
+
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4151,6 +4207,9 @@ static ssize_t
|
|
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
loff_t *ppos)
|
|
loff_t *ppos)
|
|
{
|
|
{
|
|
|
|
+ struct seq_file *m = filp->private_data;
|
|
|
|
+ struct trace_iterator *iter = m->private;
|
|
|
|
+ struct trace_array *tr = iter->tr;
|
|
unsigned long val;
|
|
unsigned long val;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
@@ -4164,30 +4223,30 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
|
|
|
|
- if (current_trace->use_max_tr) {
|
|
|
|
|
|
+ if (tr->current_trace->use_max_tr) {
|
|
ret = -EBUSY;
|
|
ret = -EBUSY;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
switch (val) {
|
|
switch (val) {
|
|
case 0:
|
|
case 0:
|
|
- if (current_trace->allocated_snapshot) {
|
|
|
|
|
|
+ if (tr->current_trace->allocated_snapshot) {
|
|
/* free spare buffer */
|
|
/* free spare buffer */
|
|
ring_buffer_resize(max_tr.buffer, 1,
|
|
ring_buffer_resize(max_tr.buffer, 1,
|
|
RING_BUFFER_ALL_CPUS);
|
|
RING_BUFFER_ALL_CPUS);
|
|
set_buffer_entries(&max_tr, 1);
|
|
set_buffer_entries(&max_tr, 1);
|
|
tracing_reset_online_cpus(&max_tr);
|
|
tracing_reset_online_cpus(&max_tr);
|
|
- current_trace->allocated_snapshot = false;
|
|
|
|
|
|
+ tr->current_trace->allocated_snapshot = false;
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
case 1:
|
|
case 1:
|
|
- if (!current_trace->allocated_snapshot) {
|
|
|
|
|
|
+ if (!tr->current_trace->allocated_snapshot) {
|
|
/* allocate spare buffer */
|
|
/* allocate spare buffer */
|
|
ret = resize_buffer_duplicate_size(&max_tr,
|
|
ret = resize_buffer_duplicate_size(&max_tr,
|
|
&global_trace, RING_BUFFER_ALL_CPUS);
|
|
&global_trace, RING_BUFFER_ALL_CPUS);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
break;
|
|
break;
|
|
- current_trace->allocated_snapshot = true;
|
|
|
|
|
|
+ tr->current_trace->allocated_snapshot = true;
|
|
}
|
|
}
|
|
|
|
|
|
local_irq_disable();
|
|
local_irq_disable();
|
|
@@ -4196,7 +4255,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
local_irq_enable();
|
|
local_irq_enable();
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- if (current_trace->allocated_snapshot)
|
|
|
|
|
|
+ if (tr->current_trace->allocated_snapshot)
|
|
tracing_reset_online_cpus(&max_tr);
|
|
tracing_reset_online_cpus(&max_tr);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -4209,6 +4268,22 @@ out:
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static int tracing_snapshot_release(struct inode *inode, struct file *file)
|
|
|
|
+{
|
|
|
|
+ struct seq_file *m = file->private_data;
|
|
|
|
+
|
|
|
|
+ if (file->f_mode & FMODE_READ)
|
|
|
|
+ return tracing_release(inode, file);
|
|
|
|
+
|
|
|
|
+ /* If write only, the seq_file is just a stub */
|
|
|
|
+ if (m)
|
|
|
|
+ kfree(m->private);
|
|
|
|
+ kfree(m);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
|
|
|
|
|
|
|
|
|
@@ -4236,10 +4311,9 @@ static const struct file_operations tracing_pipe_fops = {
|
|
};
|
|
};
|
|
|
|
|
|
static const struct file_operations tracing_entries_fops = {
|
|
static const struct file_operations tracing_entries_fops = {
|
|
- .open = tracing_entries_open,
|
|
|
|
|
|
+ .open = tracing_open_generic,
|
|
.read = tracing_entries_read,
|
|
.read = tracing_entries_read,
|
|
.write = tracing_entries_write,
|
|
.write = tracing_entries_write,
|
|
- .release = tracing_entries_release,
|
|
|
|
.llseek = generic_file_llseek,
|
|
.llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
@@ -4274,7 +4348,7 @@ static const struct file_operations snapshot_fops = {
|
|
.read = seq_read,
|
|
.read = seq_read,
|
|
.write = tracing_snapshot_write,
|
|
.write = tracing_snapshot_write,
|
|
.llseek = tracing_seek,
|
|
.llseek = tracing_seek,
|
|
- .release = tracing_release,
|
|
|
|
|
|
+ .release = tracing_snapshot_release,
|
|
};
|
|
};
|
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
|
#endif /* CONFIG_TRACER_SNAPSHOT */
|
|
|
|
|
|
@@ -4287,7 +4361,8 @@ struct ftrace_buffer_info {
|
|
|
|
|
|
static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
|
static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
|
{
|
|
{
|
|
- int cpu = (int)(long)inode->i_private;
|
|
|
|
|
|
+ struct trace_cpu *tc = inode->i_private;
|
|
|
|
+ struct trace_array *tr = tc->tr;
|
|
struct ftrace_buffer_info *info;
|
|
struct ftrace_buffer_info *info;
|
|
|
|
|
|
if (tracing_disabled)
|
|
if (tracing_disabled)
|
|
@@ -4297,8 +4372,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
|
|
if (!info)
|
|
if (!info)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
- info->tr = &global_trace;
|
|
|
|
- info->cpu = cpu;
|
|
|
|
|
|
+ info->tr = tr;
|
|
|
|
+ info->cpu = tc->cpu;
|
|
info->spare = NULL;
|
|
info->spare = NULL;
|
|
/* Force reading ring buffer for first read */
|
|
/* Force reading ring buffer for first read */
|
|
info->read = (unsigned int)-1;
|
|
info->read = (unsigned int)-1;
|
|
@@ -4535,12 +4610,13 @@ static ssize_t
|
|
tracing_stats_read(struct file *filp, char __user *ubuf,
|
|
tracing_stats_read(struct file *filp, char __user *ubuf,
|
|
size_t count, loff_t *ppos)
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
- unsigned long cpu = (unsigned long)filp->private_data;
|
|
|
|
- struct trace_array *tr = &global_trace;
|
|
|
|
|
|
+ struct trace_cpu *tc = filp->private_data;
|
|
|
|
+ struct trace_array *tr = tc->tr;
|
|
struct trace_seq *s;
|
|
struct trace_seq *s;
|
|
unsigned long cnt;
|
|
unsigned long cnt;
|
|
unsigned long long t;
|
|
unsigned long long t;
|
|
unsigned long usec_rem;
|
|
unsigned long usec_rem;
|
|
|
|
+ int cpu = tc->cpu;
|
|
|
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
if (!s)
|
|
if (!s)
|
|
@@ -4636,58 +4712,57 @@ static const struct file_operations tracing_dyn_info_fops = {
|
|
};
|
|
};
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static struct dentry *d_tracer;
|
|
|
|
-
|
|
|
|
-struct dentry *tracing_init_dentry(void)
|
|
|
|
|
|
+struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
|
|
{
|
|
{
|
|
static int once;
|
|
static int once;
|
|
|
|
|
|
- if (d_tracer)
|
|
|
|
- return d_tracer;
|
|
|
|
|
|
+ if (tr->dir)
|
|
|
|
+ return tr->dir;
|
|
|
|
|
|
if (!debugfs_initialized())
|
|
if (!debugfs_initialized())
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- d_tracer = debugfs_create_dir("tracing", NULL);
|
|
|
|
|
|
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
|
|
|
|
+ tr->dir = debugfs_create_dir("tracing", NULL);
|
|
|
|
|
|
- if (!d_tracer && !once) {
|
|
|
|
|
|
+ if (!tr->dir && !once) {
|
|
once = 1;
|
|
once = 1;
|
|
pr_warning("Could not create debugfs directory 'tracing'\n");
|
|
pr_warning("Could not create debugfs directory 'tracing'\n");
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- return d_tracer;
|
|
|
|
|
|
+ return tr->dir;
|
|
}
|
|
}
|
|
|
|
|
|
-static struct dentry *d_percpu;
|
|
|
|
|
|
+struct dentry *tracing_init_dentry(void)
|
|
|
|
+{
|
|
|
|
+ return tracing_init_dentry_tr(&global_trace);
|
|
|
|
+}
|
|
|
|
|
|
-static struct dentry *tracing_dentry_percpu(void)
|
|
|
|
|
|
+static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
|
|
{
|
|
{
|
|
- static int once;
|
|
|
|
struct dentry *d_tracer;
|
|
struct dentry *d_tracer;
|
|
|
|
|
|
- if (d_percpu)
|
|
|
|
- return d_percpu;
|
|
|
|
-
|
|
|
|
- d_tracer = tracing_init_dentry();
|
|
|
|
|
|
+ if (tr->percpu_dir)
|
|
|
|
+ return tr->percpu_dir;
|
|
|
|
|
|
|
|
+ d_tracer = tracing_init_dentry_tr(tr);
|
|
if (!d_tracer)
|
|
if (!d_tracer)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- d_percpu = debugfs_create_dir("per_cpu", d_tracer);
|
|
|
|
|
|
+ tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
|
|
|
|
|
|
- if (!d_percpu && !once) {
|
|
|
|
- once = 1;
|
|
|
|
- pr_warning("Could not create debugfs directory 'per_cpu'\n");
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
|
|
+ WARN_ONCE(!tr->percpu_dir,
|
|
|
|
+ "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
|
|
|
|
|
|
- return d_percpu;
|
|
|
|
|
|
+ return tr->percpu_dir;
|
|
}
|
|
}
|
|
|
|
|
|
-static void tracing_init_debugfs_percpu(long cpu)
|
|
|
|
|
|
+static void
|
|
|
|
+tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
|
|
{
|
|
{
|
|
- struct dentry *d_percpu = tracing_dentry_percpu();
|
|
|
|
|
|
+ struct trace_array_cpu *data = tr->data[cpu];
|
|
|
|
+ struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
|
|
struct dentry *d_cpu;
|
|
struct dentry *d_cpu;
|
|
char cpu_dir[30]; /* 30 characters should be more than enough */
|
|
char cpu_dir[30]; /* 30 characters should be more than enough */
|
|
|
|
|
|
@@ -4703,20 +4778,20 @@ static void tracing_init_debugfs_percpu(long cpu)
|
|
|
|
|
|
/* per cpu trace_pipe */
|
|
/* per cpu trace_pipe */
|
|
trace_create_file("trace_pipe", 0444, d_cpu,
|
|
trace_create_file("trace_pipe", 0444, d_cpu,
|
|
- (void *) cpu, &tracing_pipe_fops);
|
|
|
|
|
|
+ (void *)&data->trace_cpu, &tracing_pipe_fops);
|
|
|
|
|
|
/* per cpu trace */
|
|
/* per cpu trace */
|
|
trace_create_file("trace", 0644, d_cpu,
|
|
trace_create_file("trace", 0644, d_cpu,
|
|
- (void *) cpu, &tracing_fops);
|
|
|
|
|
|
+ (void *)&data->trace_cpu, &tracing_fops);
|
|
|
|
|
|
trace_create_file("trace_pipe_raw", 0444, d_cpu,
|
|
trace_create_file("trace_pipe_raw", 0444, d_cpu,
|
|
- (void *) cpu, &tracing_buffers_fops);
|
|
|
|
|
|
+ (void *)&data->trace_cpu, &tracing_buffers_fops);
|
|
|
|
|
|
trace_create_file("stats", 0444, d_cpu,
|
|
trace_create_file("stats", 0444, d_cpu,
|
|
- (void *) cpu, &tracing_stats_fops);
|
|
|
|
|
|
+ (void *)&data->trace_cpu, &tracing_stats_fops);
|
|
|
|
|
|
trace_create_file("buffer_size_kb", 0444, d_cpu,
|
|
trace_create_file("buffer_size_kb", 0444, d_cpu,
|
|
- (void *) cpu, &tracing_entries_fops);
|
|
|
|
|
|
+ (void *)&data->trace_cpu, &tracing_entries_fops);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
@@ -4727,6 +4802,7 @@ static void tracing_init_debugfs_percpu(long cpu)
|
|
struct trace_option_dentry {
|
|
struct trace_option_dentry {
|
|
struct tracer_opt *opt;
|
|
struct tracer_opt *opt;
|
|
struct tracer_flags *flags;
|
|
struct tracer_flags *flags;
|
|
|
|
+ struct trace_array *tr;
|
|
struct dentry *entry;
|
|
struct dentry *entry;
|
|
};
|
|
};
|
|
|
|
|
|
@@ -4762,7 +4838,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
|
|
|
if (!!(topt->flags->val & topt->opt->bit) != val) {
|
|
if (!!(topt->flags->val & topt->opt->bit) != val) {
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
- ret = __set_tracer_option(current_trace, topt->flags,
|
|
|
|
|
|
+ ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
|
|
topt->opt, !val);
|
|
topt->opt, !val);
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
if (ret)
|
|
if (ret)
|
|
@@ -4801,6 +4877,7 @@ static ssize_t
|
|
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
loff_t *ppos)
|
|
loff_t *ppos)
|
|
{
|
|
{
|
|
|
|
+ struct trace_array *tr = &global_trace;
|
|
long index = (long)filp->private_data;
|
|
long index = (long)filp->private_data;
|
|
unsigned long val;
|
|
unsigned long val;
|
|
int ret;
|
|
int ret;
|
|
@@ -4813,7 +4890,7 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
- ret = set_tracer_flag(1 << index, val);
|
|
|
|
|
|
+ ret = set_tracer_flag(tr, 1 << index, val);
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
@@ -4847,40 +4924,41 @@ struct dentry *trace_create_file(const char *name,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-static struct dentry *trace_options_init_dentry(void)
|
|
|
|
|
|
+static struct dentry *trace_options_init_dentry(struct trace_array *tr)
|
|
{
|
|
{
|
|
struct dentry *d_tracer;
|
|
struct dentry *d_tracer;
|
|
- static struct dentry *t_options;
|
|
|
|
|
|
|
|
- if (t_options)
|
|
|
|
- return t_options;
|
|
|
|
|
|
+ if (tr->options)
|
|
|
|
+ return tr->options;
|
|
|
|
|
|
- d_tracer = tracing_init_dentry();
|
|
|
|
|
|
+ d_tracer = tracing_init_dentry_tr(tr);
|
|
if (!d_tracer)
|
|
if (!d_tracer)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- t_options = debugfs_create_dir("options", d_tracer);
|
|
|
|
- if (!t_options) {
|
|
|
|
|
|
+ tr->options = debugfs_create_dir("options", d_tracer);
|
|
|
|
+ if (!tr->options) {
|
|
pr_warning("Could not create debugfs directory 'options'\n");
|
|
pr_warning("Could not create debugfs directory 'options'\n");
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- return t_options;
|
|
|
|
|
|
+ return tr->options;
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
-create_trace_option_file(struct trace_option_dentry *topt,
|
|
|
|
|
|
+create_trace_option_file(struct trace_array *tr,
|
|
|
|
+ struct trace_option_dentry *topt,
|
|
struct tracer_flags *flags,
|
|
struct tracer_flags *flags,
|
|
struct tracer_opt *opt)
|
|
struct tracer_opt *opt)
|
|
{
|
|
{
|
|
struct dentry *t_options;
|
|
struct dentry *t_options;
|
|
|
|
|
|
- t_options = trace_options_init_dentry();
|
|
|
|
|
|
+ t_options = trace_options_init_dentry(tr);
|
|
if (!t_options)
|
|
if (!t_options)
|
|
return;
|
|
return;
|
|
|
|
|
|
topt->flags = flags;
|
|
topt->flags = flags;
|
|
topt->opt = opt;
|
|
topt->opt = opt;
|
|
|
|
+ topt->tr = tr;
|
|
|
|
|
|
topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
|
|
topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
|
|
&trace_options_fops);
|
|
&trace_options_fops);
|
|
@@ -4888,7 +4966,7 @@ create_trace_option_file(struct trace_option_dentry *topt,
|
|
}
|
|
}
|
|
|
|
|
|
static struct trace_option_dentry *
|
|
static struct trace_option_dentry *
|
|
-create_trace_option_files(struct tracer *tracer)
|
|
|
|
|
|
+create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
|
|
{
|
|
{
|
|
struct trace_option_dentry *topts;
|
|
struct trace_option_dentry *topts;
|
|
struct tracer_flags *flags;
|
|
struct tracer_flags *flags;
|
|
@@ -4913,7 +4991,7 @@ create_trace_option_files(struct tracer *tracer)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
for (cnt = 0; opts[cnt].name; cnt++)
|
|
for (cnt = 0; opts[cnt].name; cnt++)
|
|
- create_trace_option_file(&topts[cnt], flags,
|
|
|
|
|
|
+ create_trace_option_file(tr, &topts[cnt], flags,
|
|
&opts[cnt]);
|
|
&opts[cnt]);
|
|
|
|
|
|
return topts;
|
|
return topts;
|
|
@@ -4936,11 +5014,12 @@ destroy_trace_option_files(struct trace_option_dentry *topts)
|
|
}
|
|
}
|
|
|
|
|
|
static struct dentry *
|
|
static struct dentry *
|
|
-create_trace_option_core_file(const char *option, long index)
|
|
|
|
|
|
+create_trace_option_core_file(struct trace_array *tr,
|
|
|
|
+ const char *option, long index)
|
|
{
|
|
{
|
|
struct dentry *t_options;
|
|
struct dentry *t_options;
|
|
|
|
|
|
- t_options = trace_options_init_dentry();
|
|
|
|
|
|
+ t_options = trace_options_init_dentry(tr);
|
|
if (!t_options)
|
|
if (!t_options)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
@@ -4948,17 +5027,17 @@ create_trace_option_core_file(const char *option, long index)
|
|
&trace_options_core_fops);
|
|
&trace_options_core_fops);
|
|
}
|
|
}
|
|
|
|
|
|
-static __init void create_trace_options_dir(void)
|
|
|
|
|
|
+static __init void create_trace_options_dir(struct trace_array *tr)
|
|
{
|
|
{
|
|
struct dentry *t_options;
|
|
struct dentry *t_options;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- t_options = trace_options_init_dentry();
|
|
|
|
|
|
+ t_options = trace_options_init_dentry(tr);
|
|
if (!t_options)
|
|
if (!t_options)
|
|
return;
|
|
return;
|
|
|
|
|
|
for (i = 0; trace_options[i]; i++)
|
|
for (i = 0; trace_options[i]; i++)
|
|
- create_trace_option_core_file(trace_options[i], i);
|
|
|
|
|
|
+ create_trace_option_core_file(tr, trace_options[i], i);
|
|
}
|
|
}
|
|
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
@@ -4997,12 +5076,12 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
|
|
mutex_lock(&trace_types_lock);
|
|
mutex_lock(&trace_types_lock);
|
|
if (val) {
|
|
if (val) {
|
|
ring_buffer_record_on(buffer);
|
|
ring_buffer_record_on(buffer);
|
|
- if (current_trace->start)
|
|
|
|
- current_trace->start(tr);
|
|
|
|
|
|
+ if (tr->current_trace->start)
|
|
|
|
+ tr->current_trace->start(tr);
|
|
} else {
|
|
} else {
|
|
ring_buffer_record_off(buffer);
|
|
ring_buffer_record_off(buffer);
|
|
- if (current_trace->stop)
|
|
|
|
- current_trace->stop(tr);
|
|
|
|
|
|
+ if (tr->current_trace->stop)
|
|
|
|
+ tr->current_trace->stop(tr);
|
|
}
|
|
}
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
}
|
|
}
|
|
@@ -5019,6 +5098,38 @@ static const struct file_operations rb_simple_fops = {
|
|
.llseek = default_llseek,
|
|
.llseek = default_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static void
|
|
|
|
+init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
|
|
|
|
+{
|
|
|
|
+
|
|
|
|
+ trace_create_file("trace_options", 0644, d_tracer,
|
|
|
|
+ tr, &tracing_iter_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("trace", 0644, d_tracer,
|
|
|
|
+ (void *)&tr->trace_cpu, &tracing_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("trace_pipe", 0444, d_tracer,
|
|
|
|
+ (void *)&tr->trace_cpu, &tracing_pipe_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("buffer_size_kb", 0644, d_tracer,
|
|
|
|
+ (void *)&tr->trace_cpu, &tracing_entries_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("buffer_total_size_kb", 0444, d_tracer,
|
|
|
|
+ tr, &tracing_total_entries_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("free_buffer", 0644, d_tracer,
|
|
|
|
+ tr, &tracing_free_buffer_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("trace_marker", 0220, d_tracer,
|
|
|
|
+ tr, &tracing_mark_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("trace_clock", 0644, d_tracer, tr,
|
|
|
|
+ &trace_clock_fops);
|
|
|
|
+
|
|
|
|
+ trace_create_file("tracing_on", 0644, d_tracer,
|
|
|
|
+ tr, &rb_simple_fops);
|
|
|
|
+}
|
|
|
|
+
|
|
static __init int tracer_init_debugfs(void)
|
|
static __init int tracer_init_debugfs(void)
|
|
{
|
|
{
|
|
struct dentry *d_tracer;
|
|
struct dentry *d_tracer;
|
|
@@ -5028,14 +5139,10 @@ static __init int tracer_init_debugfs(void)
|
|
|
|
|
|
d_tracer = tracing_init_dentry();
|
|
d_tracer = tracing_init_dentry();
|
|
|
|
|
|
- trace_create_file("trace_options", 0644, d_tracer,
|
|
|
|
- NULL, &tracing_iter_fops);
|
|
|
|
|
|
+ init_tracer_debugfs(&global_trace, d_tracer);
|
|
|
|
|
|
trace_create_file("tracing_cpumask", 0644, d_tracer,
|
|
trace_create_file("tracing_cpumask", 0644, d_tracer,
|
|
- NULL, &tracing_cpumask_fops);
|
|
|
|
-
|
|
|
|
- trace_create_file("trace", 0644, d_tracer,
|
|
|
|
- (void *) RING_BUFFER_ALL_CPUS, &tracing_fops);
|
|
|
|
|
|
+ &global_trace, &tracing_cpumask_fops);
|
|
|
|
|
|
trace_create_file("available_tracers", 0444, d_tracer,
|
|
trace_create_file("available_tracers", 0444, d_tracer,
|
|
&global_trace, &show_traces_fops);
|
|
&global_trace, &show_traces_fops);
|
|
@@ -5054,30 +5161,9 @@ static __init int tracer_init_debugfs(void)
|
|
trace_create_file("README", 0444, d_tracer,
|
|
trace_create_file("README", 0444, d_tracer,
|
|
NULL, &tracing_readme_fops);
|
|
NULL, &tracing_readme_fops);
|
|
|
|
|
|
- trace_create_file("trace_pipe", 0444, d_tracer,
|
|
|
|
- (void *) RING_BUFFER_ALL_CPUS, &tracing_pipe_fops);
|
|
|
|
-
|
|
|
|
- trace_create_file("buffer_size_kb", 0644, d_tracer,
|
|
|
|
- (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
|
|
|
|
-
|
|
|
|
- trace_create_file("buffer_total_size_kb", 0444, d_tracer,
|
|
|
|
- &global_trace, &tracing_total_entries_fops);
|
|
|
|
-
|
|
|
|
- trace_create_file("free_buffer", 0644, d_tracer,
|
|
|
|
- &global_trace, &tracing_free_buffer_fops);
|
|
|
|
-
|
|
|
|
- trace_create_file("trace_marker", 0220, d_tracer,
|
|
|
|
- NULL, &tracing_mark_fops);
|
|
|
|
-
|
|
|
|
trace_create_file("saved_cmdlines", 0444, d_tracer,
|
|
trace_create_file("saved_cmdlines", 0444, d_tracer,
|
|
NULL, &tracing_saved_cmdlines_fops);
|
|
NULL, &tracing_saved_cmdlines_fops);
|
|
|
|
|
|
- trace_create_file("trace_clock", 0644, d_tracer, NULL,
|
|
|
|
- &trace_clock_fops);
|
|
|
|
-
|
|
|
|
- trace_create_file("tracing_on", 0644, d_tracer,
|
|
|
|
- &global_trace, &rb_simple_fops);
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
|
|
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
|
|
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
|
|
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
|
|
@@ -5085,13 +5171,13 @@ static __init int tracer_init_debugfs(void)
|
|
|
|
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
#ifdef CONFIG_TRACER_SNAPSHOT
|
|
trace_create_file("snapshot", 0644, d_tracer,
|
|
trace_create_file("snapshot", 0644, d_tracer,
|
|
- (void *) RING_BUFFER_ALL_CPUS, &snapshot_fops);
|
|
|
|
|
|
+ (void *)&global_trace.trace_cpu, &snapshot_fops);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- create_trace_options_dir();
|
|
|
|
|
|
+ create_trace_options_dir(&global_trace);
|
|
|
|
|
|
for_each_tracing_cpu(cpu)
|
|
for_each_tracing_cpu(cpu)
|
|
- tracing_init_debugfs_percpu(cpu);
|
|
|
|
|
|
+ tracing_init_debugfs_percpu(&global_trace, cpu);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -5161,7 +5247,7 @@ trace_printk_seq(struct trace_seq *s)
|
|
void trace_init_global_iter(struct trace_iterator *iter)
|
|
void trace_init_global_iter(struct trace_iterator *iter)
|
|
{
|
|
{
|
|
iter->tr = &global_trace;
|
|
iter->tr = &global_trace;
|
|
- iter->trace = current_trace;
|
|
|
|
|
|
+ iter->trace = iter->tr->current_trace;
|
|
iter->cpu_file = RING_BUFFER_ALL_CPUS;
|
|
iter->cpu_file = RING_BUFFER_ALL_CPUS;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5315,6 +5401,8 @@ __init static int tracer_alloc_buffers(void)
|
|
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
|
|
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
|
|
cpumask_copy(tracing_cpumask, cpu_all_mask);
|
|
cpumask_copy(tracing_cpumask, cpu_all_mask);
|
|
|
|
|
|
|
|
+ raw_spin_lock_init(&global_trace.start_lock);
|
|
|
|
+
|
|
/* TODO: make the number of buffers hot pluggable with CPUS */
|
|
/* TODO: make the number of buffers hot pluggable with CPUS */
|
|
global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
|
|
global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
|
|
if (!global_trace.buffer) {
|
|
if (!global_trace.buffer) {
|
|
@@ -5328,6 +5416,7 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
max_tr.buffer = ring_buffer_alloc(1, rb_flags);
|
|
max_tr.buffer = ring_buffer_alloc(1, rb_flags);
|
|
|
|
+ raw_spin_lock_init(&max_tr.start_lock);
|
|
if (!max_tr.buffer) {
|
|
if (!max_tr.buffer) {
|
|
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
|
|
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
|
|
WARN_ON(1);
|
|
WARN_ON(1);
|
|
@@ -5339,7 +5428,11 @@ __init static int tracer_alloc_buffers(void)
|
|
/* Allocate the first page for all buffers */
|
|
/* Allocate the first page for all buffers */
|
|
for_each_tracing_cpu(i) {
|
|
for_each_tracing_cpu(i) {
|
|
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
|
|
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
|
|
|
|
+ global_trace.data[i]->trace_cpu.cpu = i;
|
|
|
|
+ global_trace.data[i]->trace_cpu.tr = &global_trace;
|
|
max_tr.data[i] = &per_cpu(max_tr_data, i);
|
|
max_tr.data[i] = &per_cpu(max_tr_data, i);
|
|
|
|
+ max_tr.data[i]->trace_cpu.cpu = i;
|
|
|
|
+ max_tr.data[i]->trace_cpu.tr = &max_tr;
|
|
}
|
|
}
|
|
|
|
|
|
set_buffer_entries(&global_trace,
|
|
set_buffer_entries(&global_trace,
|
|
@@ -5353,6 +5446,8 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
register_tracer(&nop_trace);
|
|
register_tracer(&nop_trace);
|
|
|
|
|
|
|
|
+ global_trace.current_trace = &nop_trace;
|
|
|
|
+
|
|
/* All seems OK, enable tracing */
|
|
/* All seems OK, enable tracing */
|
|
tracing_disabled = 0;
|
|
tracing_disabled = 0;
|
|
|
|
|
|
@@ -5363,6 +5458,10 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
|
|
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
|
|
|
|
|
|
|
|
+ /* Holder for file callbacks */
|
|
|
|
+ global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
|
|
|
|
+ global_trace.trace_cpu.tr = &global_trace;
|
|
|
|
+
|
|
INIT_LIST_HEAD(&global_trace.systems);
|
|
INIT_LIST_HEAD(&global_trace.systems);
|
|
INIT_LIST_HEAD(&global_trace.events);
|
|
INIT_LIST_HEAD(&global_trace.events);
|
|
list_add(&global_trace.list, &ftrace_trace_arrays);
|
|
list_add(&global_trace.list, &ftrace_trace_arrays);
|
|
@@ -5371,7 +5470,7 @@ __init static int tracer_alloc_buffers(void)
|
|
char *option;
|
|
char *option;
|
|
|
|
|
|
option = strsep(&trace_boot_options, ",");
|
|
option = strsep(&trace_boot_options, ",");
|
|
- trace_set_options(option);
|
|
|
|
|
|
+ trace_set_options(&global_trace, option);
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|