|
@@ -275,7 +275,7 @@ int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(call_filter_check_discard);
|
|
|
|
|
|
-cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
|
|
|
+static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
|
|
|
{
|
|
|
u64 ts;
|
|
|
|
|
@@ -599,7 +599,7 @@ static int alloc_snapshot(struct trace_array *tr)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void free_snapshot(struct trace_array *tr)
|
|
|
+static void free_snapshot(struct trace_array *tr)
|
|
|
{
|
|
|
/*
|
|
|
* We don't free the ring buffer. instead, resize it because
|
|
@@ -963,27 +963,9 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
|
|
|
return cnt;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * ftrace_max_lock is used to protect the swapping of buffers
|
|
|
- * when taking a max snapshot. The buffers themselves are
|
|
|
- * protected by per_cpu spinlocks. But the action of the swap
|
|
|
- * needs its own lock.
|
|
|
- *
|
|
|
- * This is defined as a arch_spinlock_t in order to help
|
|
|
- * with performance when lockdep debugging is enabled.
|
|
|
- *
|
|
|
- * It is also used in other places outside the update_max_tr
|
|
|
- * so it needs to be defined outside of the
|
|
|
- * CONFIG_TRACER_MAX_TRACE.
|
|
|
- */
|
|
|
-static arch_spinlock_t ftrace_max_lock =
|
|
|
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
-
|
|
|
unsigned long __read_mostly tracing_thresh;
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
-unsigned long __read_mostly tracing_max_latency;
|
|
|
-
|
|
|
/*
|
|
|
* Copy the new maximum trace into the separate maximum-trace
|
|
|
* structure. (this way the maximum trace is permanently saved,
|
|
@@ -1000,7 +982,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
max_buf->cpu = cpu;
|
|
|
max_buf->time_start = data->preempt_timestamp;
|
|
|
|
|
|
- max_data->saved_latency = tracing_max_latency;
|
|
|
+ max_data->saved_latency = tr->max_latency;
|
|
|
max_data->critical_start = data->critical_start;
|
|
|
max_data->critical_end = data->critical_end;
|
|
|
|
|
@@ -1048,14 +1030,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- arch_spin_lock(&ftrace_max_lock);
|
|
|
+ arch_spin_lock(&tr->max_lock);
|
|
|
|
|
|
buf = tr->trace_buffer.buffer;
|
|
|
tr->trace_buffer.buffer = tr->max_buffer.buffer;
|
|
|
tr->max_buffer.buffer = buf;
|
|
|
|
|
|
__update_max_tr(tr, tsk, cpu);
|
|
|
- arch_spin_unlock(&ftrace_max_lock);
|
|
|
+ arch_spin_unlock(&tr->max_lock);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1081,7 +1063,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- arch_spin_lock(&ftrace_max_lock);
|
|
|
+ arch_spin_lock(&tr->max_lock);
|
|
|
|
|
|
ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
|
|
|
|
|
@@ -1099,11 +1081,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
|
|
|
|
|
|
__update_max_tr(tr, tsk, cpu);
|
|
|
- arch_spin_unlock(&ftrace_max_lock);
|
|
|
+ arch_spin_unlock(&tr->max_lock);
|
|
|
}
|
|
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
|
|
|
|
|
-static void default_wait_pipe(struct trace_iterator *iter)
|
|
|
+static void wait_on_pipe(struct trace_iterator *iter)
|
|
|
{
|
|
|
/* Iterators are static, they should be filled or empty */
|
|
|
if (trace_buffer_iter(iter, iter->cpu_file))
|
|
@@ -1220,8 +1202,6 @@ int register_tracer(struct tracer *type)
|
|
|
else
|
|
|
if (!type->flags->opts)
|
|
|
type->flags->opts = dummy_tracer_opt;
|
|
|
- if (!type->wait_pipe)
|
|
|
- type->wait_pipe = default_wait_pipe;
|
|
|
|
|
|
ret = run_tracer_selftest(type);
|
|
|
if (ret < 0)
|
|
@@ -1305,22 +1285,71 @@ void tracing_reset_all_online_cpus(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#define SAVED_CMDLINES 128
|
|
|
+#define SAVED_CMDLINES_DEFAULT 128
|
|
|
#define NO_CMDLINE_MAP UINT_MAX
|
|
|
-static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
|
|
-static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
|
|
|
-static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
|
|
|
-static int cmdline_idx;
|
|
|
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
+struct saved_cmdlines_buffer {
|
|
|
+ unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
|
|
+ unsigned *map_cmdline_to_pid;
|
|
|
+ unsigned cmdline_num;
|
|
|
+ int cmdline_idx;
|
|
|
+ char *saved_cmdlines;
|
|
|
+};
|
|
|
+static struct saved_cmdlines_buffer *savedcmd;
|
|
|
|
|
|
/* temporary disable recording */
|
|
|
static atomic_t trace_record_cmdline_disabled __read_mostly;
|
|
|
|
|
|
-static void trace_init_cmdlines(void)
|
|
|
+static inline char *get_saved_cmdlines(int idx)
|
|
|
{
|
|
|
- memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
|
|
|
- memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
|
|
|
- cmdline_idx = 0;
|
|
|
+ return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
|
|
|
+}
|
|
|
+
|
|
|
+static inline void set_cmdline(int idx, const char *cmdline)
|
|
|
+{
|
|
|
+ memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
|
|
|
+}
|
|
|
+
|
|
|
+static int allocate_cmdlines_buffer(unsigned int val,
|
|
|
+ struct saved_cmdlines_buffer *s)
|
|
|
+{
|
|
|
+ s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!s->map_cmdline_to_pid)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
|
|
|
+ if (!s->saved_cmdlines) {
|
|
|
+ kfree(s->map_cmdline_to_pid);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ s->cmdline_idx = 0;
|
|
|
+ s->cmdline_num = val;
|
|
|
+ memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
|
|
|
+ sizeof(s->map_pid_to_cmdline));
|
|
|
+ memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
|
|
|
+ val * sizeof(*s->map_cmdline_to_pid));
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int trace_create_savedcmd(void)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ savedcmd = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
|
|
|
+ if (!savedcmd)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
|
|
|
+ if (ret < 0) {
|
|
|
+ kfree(savedcmd);
|
|
|
+ savedcmd = NULL;
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
int is_tracing_stopped(void)
|
|
@@ -1353,7 +1382,7 @@ void tracing_start(void)
|
|
|
}
|
|
|
|
|
|
/* Prevent the buffers from switching */
|
|
|
- arch_spin_lock(&ftrace_max_lock);
|
|
|
+ arch_spin_lock(&global_trace.max_lock);
|
|
|
|
|
|
buffer = global_trace.trace_buffer.buffer;
|
|
|
if (buffer)
|
|
@@ -1365,7 +1394,7 @@ void tracing_start(void)
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
#endif
|
|
|
|
|
|
- arch_spin_unlock(&ftrace_max_lock);
|
|
|
+ arch_spin_unlock(&global_trace.max_lock);
|
|
|
|
|
|
ftrace_start();
|
|
|
out:
|
|
@@ -1420,7 +1449,7 @@ void tracing_stop(void)
|
|
|
goto out;
|
|
|
|
|
|
/* Prevent the buffers from switching */
|
|
|
- arch_spin_lock(&ftrace_max_lock);
|
|
|
+ arch_spin_lock(&global_trace.max_lock);
|
|
|
|
|
|
buffer = global_trace.trace_buffer.buffer;
|
|
|
if (buffer)
|
|
@@ -1432,7 +1461,7 @@ void tracing_stop(void)
|
|
|
ring_buffer_record_disable(buffer);
|
|
|
#endif
|
|
|
|
|
|
- arch_spin_unlock(&ftrace_max_lock);
|
|
|
+ arch_spin_unlock(&global_trace.max_lock);
|
|
|
|
|
|
out:
|
|
|
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
|
|
@@ -1461,12 +1490,12 @@ static void tracing_stop_tr(struct trace_array *tr)
|
|
|
|
|
|
void trace_stop_cmdline_recording(void);
|
|
|
|
|
|
-static void trace_save_cmdline(struct task_struct *tsk)
|
|
|
+static int trace_save_cmdline(struct task_struct *tsk)
|
|
|
{
|
|
|
unsigned pid, idx;
|
|
|
|
|
|
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
/*
|
|
|
* It's not the end of the world if we don't get
|
|
@@ -1475,11 +1504,11 @@ static void trace_save_cmdline(struct task_struct *tsk)
|
|
|
* so if we miss here, then better luck next time.
|
|
|
*/
|
|
|
if (!arch_spin_trylock(&trace_cmdline_lock))
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
- idx = map_pid_to_cmdline[tsk->pid];
|
|
|
+ idx = savedcmd->map_pid_to_cmdline[tsk->pid];
|
|
|
if (idx == NO_CMDLINE_MAP) {
|
|
|
- idx = (cmdline_idx + 1) % SAVED_CMDLINES;
|
|
|
+ idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
|
|
|
|
|
|
/*
|
|
|
* Check whether the cmdline buffer at idx has a pid
|
|
@@ -1487,22 +1516,24 @@ static void trace_save_cmdline(struct task_struct *tsk)
|
|
|
* need to clear the map_pid_to_cmdline. Otherwise we
|
|
|
* would read the new comm for the old pid.
|
|
|
*/
|
|
|
- pid = map_cmdline_to_pid[idx];
|
|
|
+ pid = savedcmd->map_cmdline_to_pid[idx];
|
|
|
if (pid != NO_CMDLINE_MAP)
|
|
|
- map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
|
|
|
+ savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
|
|
|
|
|
|
- map_cmdline_to_pid[idx] = tsk->pid;
|
|
|
- map_pid_to_cmdline[tsk->pid] = idx;
|
|
|
+ savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
|
|
|
+ savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
|
|
|
|
|
|
- cmdline_idx = idx;
|
|
|
+ savedcmd->cmdline_idx = idx;
|
|
|
}
|
|
|
|
|
|
- memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
|
|
|
+ set_cmdline(idx, tsk->comm);
|
|
|
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
|
|
+
|
|
|
+ return 1;
|
|
|
}
|
|
|
|
|
|
-void trace_find_cmdline(int pid, char comm[])
|
|
|
+static void __trace_find_cmdline(int pid, char comm[])
|
|
|
{
|
|
|
unsigned map;
|
|
|
|
|
@@ -1521,13 +1552,19 @@ void trace_find_cmdline(int pid, char comm[])
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- preempt_disable();
|
|
|
- arch_spin_lock(&trace_cmdline_lock);
|
|
|
- map = map_pid_to_cmdline[pid];
|
|
|
+ map = savedcmd->map_pid_to_cmdline[pid];
|
|
|
if (map != NO_CMDLINE_MAP)
|
|
|
- strcpy(comm, saved_cmdlines[map]);
|
|
|
+ strcpy(comm, get_saved_cmdlines(map));
|
|
|
else
|
|
|
strcpy(comm, "<...>");
|
|
|
+}
|
|
|
+
|
|
|
+void trace_find_cmdline(int pid, char comm[])
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+ arch_spin_lock(&trace_cmdline_lock);
|
|
|
+
|
|
|
+ __trace_find_cmdline(pid, comm);
|
|
|
|
|
|
arch_spin_unlock(&trace_cmdline_lock);
|
|
|
preempt_enable();
|
|
@@ -1541,9 +1578,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
|
|
|
if (!__this_cpu_read(trace_cmdline_save))
|
|
|
return;
|
|
|
|
|
|
- __this_cpu_write(trace_cmdline_save, false);
|
|
|
-
|
|
|
- trace_save_cmdline(tsk);
|
|
|
+ if (trace_save_cmdline(tsk))
|
|
|
+ __this_cpu_write(trace_cmdline_save, false);
|
|
|
}
|
|
|
|
|
|
void
|
|
@@ -1746,7 +1782,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
*/
|
|
|
barrier();
|
|
|
if (use_stack == 1) {
|
|
|
- trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
|
|
|
+ trace.entries = this_cpu_ptr(ftrace_stack.calls);
|
|
|
trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
|
|
|
|
|
|
if (regs)
|
|
@@ -1995,7 +2031,21 @@ void trace_printk_init_buffers(void)
|
|
|
if (alloc_percpu_trace_buffer())
|
|
|
return;
|
|
|
|
|
|
- pr_info("ftrace: Allocated trace_printk buffers\n");
|
|
|
+ /* trace_printk() is for debug use only. Don't use it in production. */
|
|
|
+
|
|
|
+ pr_warning("\n**********************************************************\n");
|
|
|
+ pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
|
|
+ pr_warning("** **\n");
|
|
|
+ pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
|
|
|
+ pr_warning("** **\n");
|
|
|
+ pr_warning("** This means that this is a DEBUG kernel and it is **\n");
|
|
|
+ pr_warning("** unsafe for produciton use. **\n");
|
|
|
+ pr_warning("** **\n");
|
|
|
+ pr_warning("** If you see this message and you are not debugging **\n");
|
|
|
+ pr_warning("** the kernel, report this immediately to your vendor! **\n");
|
|
|
+ pr_warning("** **\n");
|
|
|
+ pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
|
|
+ pr_warning("**********************************************************\n");
|
|
|
|
|
|
/* Expand the buffers to set size */
|
|
|
tracing_update_buffers();
|
|
@@ -3333,7 +3383,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
|
|
mutex_lock(&tracing_cpumask_update_lock);
|
|
|
|
|
|
local_irq_disable();
|
|
|
- arch_spin_lock(&ftrace_max_lock);
|
|
|
+ arch_spin_lock(&tr->max_lock);
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
/*
|
|
|
* Increase/decrease the disabled counter if we are
|
|
@@ -3350,7 +3400,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
|
|
ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
|
|
|
}
|
|
|
}
|
|
|
- arch_spin_unlock(&ftrace_max_lock);
|
|
|
+ arch_spin_unlock(&tr->max_lock);
|
|
|
local_irq_enable();
|
|
|
|
|
|
cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
|
|
@@ -3592,6 +3642,7 @@ static const char readme_msg[] =
|
|
|
" trace_options\t\t- Set format or modify how tracing happens\n"
|
|
|
"\t\t\t Disable an option by adding a suffix 'no' to the\n"
|
|
|
"\t\t\t option name\n"
|
|
|
+ " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
"\n available_filter_functions - list of functions that can be filtered on\n"
|
|
|
" set_ftrace_filter\t- echo function name in here to only trace these\n"
|
|
@@ -3705,55 +3756,153 @@ static const struct file_operations tracing_readme_fops = {
|
|
|
.llseek = generic_file_llseek,
|
|
|
};
|
|
|
|
|
|
+static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
+{
|
|
|
+ unsigned int *ptr = v;
|
|
|
+
|
|
|
+ if (*pos || m->count)
|
|
|
+ ptr++;
|
|
|
+
|
|
|
+ (*pos)++;
|
|
|
+
|
|
|
+ for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
|
|
|
+ ptr++) {
|
|
|
+ if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ return ptr;
|
|
|
+ }
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
|
|
|
+{
|
|
|
+ void *v;
|
|
|
+ loff_t l = 0;
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+ arch_spin_lock(&trace_cmdline_lock);
|
|
|
+
|
|
|
+ v = &savedcmd->map_cmdline_to_pid[0];
|
|
|
+ while (l <= *pos) {
|
|
|
+ v = saved_cmdlines_next(m, v, &l);
|
|
|
+ if (!v)
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return v;
|
|
|
+}
|
|
|
+
|
|
|
+static void saved_cmdlines_stop(struct seq_file *m, void *v)
|
|
|
+{
|
|
|
+ arch_spin_unlock(&trace_cmdline_lock);
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+static int saved_cmdlines_show(struct seq_file *m, void *v)
|
|
|
+{
|
|
|
+ char buf[TASK_COMM_LEN];
|
|
|
+ unsigned int *pid = v;
|
|
|
+
|
|
|
+ __trace_find_cmdline(*pid, buf);
|
|
|
+ seq_printf(m, "%d %s\n", *pid, buf);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
|
|
|
+ .start = saved_cmdlines_start,
|
|
|
+ .next = saved_cmdlines_next,
|
|
|
+ .stop = saved_cmdlines_stop,
|
|
|
+ .show = saved_cmdlines_show,
|
|
|
+};
|
|
|
+
|
|
|
+static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
|
|
|
+{
|
|
|
+ if (tracing_disabled)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
|
|
|
+}
|
|
|
+
|
|
|
+static const struct file_operations tracing_saved_cmdlines_fops = {
|
|
|
+ .open = tracing_saved_cmdlines_open,
|
|
|
+ .read = seq_read,
|
|
|
+ .llseek = seq_lseek,
|
|
|
+ .release = seq_release,
|
|
|
+};
|
|
|
+
|
|
|
static ssize_t
|
|
|
-tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
+tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
|
|
|
+ size_t cnt, loff_t *ppos)
|
|
|
{
|
|
|
- char *buf_comm;
|
|
|
- char *file_buf;
|
|
|
- char *buf;
|
|
|
- int len = 0;
|
|
|
- int pid;
|
|
|
- int i;
|
|
|
+ char buf[64];
|
|
|
+ int r;
|
|
|
+
|
|
|
+ arch_spin_lock(&trace_cmdline_lock);
|
|
|
+ r = sprintf(buf, "%u\n", savedcmd->cmdline_num);
|
|
|
+ arch_spin_unlock(&trace_cmdline_lock);
|
|
|
+
|
|
|
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
+}
|
|
|
+
|
|
|
+static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
|
|
|
+{
|
|
|
+ kfree(s->saved_cmdlines);
|
|
|
+ kfree(s->map_cmdline_to_pid);
|
|
|
+ kfree(s);
|
|
|
+}
|
|
|
+
|
|
|
+static int tracing_resize_saved_cmdlines(unsigned int val)
|
|
|
+{
|
|
|
+ struct saved_cmdlines_buffer *s, *savedcmd_temp;
|
|
|
|
|
|
- file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
|
|
|
- if (!file_buf)
|
|
|
+ s = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
|
|
|
+ if (!s)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
|
|
|
- if (!buf_comm) {
|
|
|
- kfree(file_buf);
|
|
|
+ if (allocate_cmdlines_buffer(val, s) < 0) {
|
|
|
+ kfree(s);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- buf = file_buf;
|
|
|
+ arch_spin_lock(&trace_cmdline_lock);
|
|
|
+ savedcmd_temp = savedcmd;
|
|
|
+ savedcmd = s;
|
|
|
+ arch_spin_unlock(&trace_cmdline_lock);
|
|
|
+ free_saved_cmdlines_buffer(savedcmd_temp);
|
|
|
|
|
|
- for (i = 0; i < SAVED_CMDLINES; i++) {
|
|
|
- int r;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- pid = map_cmdline_to_pid[i];
|
|
|
- if (pid == -1 || pid == NO_CMDLINE_MAP)
|
|
|
- continue;
|
|
|
+static ssize_t
|
|
|
+tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
|
|
|
+ size_t cnt, loff_t *ppos)
|
|
|
+{
|
|
|
+ unsigned long val;
|
|
|
+ int ret;
|
|
|
|
|
|
- trace_find_cmdline(pid, buf_comm);
|
|
|
- r = sprintf(buf, "%d %s\n", pid, buf_comm);
|
|
|
- buf += r;
|
|
|
- len += r;
|
|
|
- }
|
|
|
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
- len = simple_read_from_buffer(ubuf, cnt, ppos,
|
|
|
- file_buf, len);
|
|
|
+ /* must have at least 1 entry or less than PID_MAX_DEFAULT */
|
|
|
+ if (!val || val > PID_MAX_DEFAULT)
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- kfree(file_buf);
|
|
|
- kfree(buf_comm);
|
|
|
+ ret = tracing_resize_saved_cmdlines((unsigned int)val);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
|
|
|
- return len;
|
|
|
+ *ppos += cnt;
|
|
|
+
|
|
|
+ return cnt;
|
|
|
}
|
|
|
|
|
|
-static const struct file_operations tracing_saved_cmdlines_fops = {
|
|
|
- .open = tracing_open_generic,
|
|
|
- .read = tracing_saved_cmdlines_read,
|
|
|
- .llseek = generic_file_llseek,
|
|
|
+static const struct file_operations tracing_saved_cmdlines_size_fops = {
|
|
|
+ .open = tracing_open_generic,
|
|
|
+ .read = tracing_saved_cmdlines_size_read,
|
|
|
+ .write = tracing_saved_cmdlines_size_write,
|
|
|
};
|
|
|
|
|
|
static ssize_t
|
|
@@ -4225,25 +4374,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|
|
return trace_poll(iter, filp, poll_table);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This is a make-shift waitqueue.
|
|
|
- * A tracer might use this callback on some rare cases:
|
|
|
- *
|
|
|
- * 1) the current tracer might hold the runqueue lock when it wakes up
|
|
|
- * a reader, hence a deadlock (sched, function, and function graph tracers)
|
|
|
- * 2) the function tracers, trace all functions, we don't want
|
|
|
- * the overhead of calling wake_up and friends
|
|
|
- * (and tracing them too)
|
|
|
- *
|
|
|
- * Anyway, this is really very primitive wakeup.
|
|
|
- */
|
|
|
-void poll_wait_pipe(struct trace_iterator *iter)
|
|
|
-{
|
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
|
- /* sleep for 100 msecs, and try again. */
|
|
|
- schedule_timeout(HZ / 10);
|
|
|
-}
|
|
|
-
|
|
|
/* Must be called with trace_types_lock mutex held. */
|
|
|
static int tracing_wait_pipe(struct file *filp)
|
|
|
{
|
|
@@ -4255,15 +4385,6 @@ static int tracing_wait_pipe(struct file *filp)
|
|
|
return -EAGAIN;
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&iter->mutex);
|
|
|
-
|
|
|
- iter->trace->wait_pipe(iter);
|
|
|
-
|
|
|
- mutex_lock(&iter->mutex);
|
|
|
-
|
|
|
- if (signal_pending(current))
|
|
|
- return -EINTR;
|
|
|
-
|
|
|
/*
|
|
|
* We block until we read something and tracing is disabled.
|
|
|
* We still block if tracing is disabled, but we have never
|
|
@@ -4275,6 +4396,15 @@ static int tracing_wait_pipe(struct file *filp)
|
|
|
*/
|
|
|
if (!tracing_is_on() && iter->pos)
|
|
|
break;
|
|
|
+
|
|
|
+ mutex_unlock(&iter->mutex);
|
|
|
+
|
|
|
+ wait_on_pipe(iter);
|
|
|
+
|
|
|
+ mutex_lock(&iter->mutex);
|
|
|
+
|
|
|
+ if (signal_pending(current))
|
|
|
+ return -EINTR;
|
|
|
}
|
|
|
|
|
|
return 1;
|
|
@@ -5197,7 +5327,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
- iter->trace->wait_pipe(iter);
|
|
|
+ wait_on_pipe(iter);
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
if (signal_pending(current)) {
|
|
|
size = -EINTR;
|
|
@@ -5408,7 +5538,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|
|
goto out;
|
|
|
}
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
- iter->trace->wait_pipe(iter);
|
|
|
+ wait_on_pipe(iter);
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
if (signal_pending(current)) {
|
|
|
ret = -EINTR;
|
|
@@ -6102,6 +6232,25 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void free_trace_buffers(struct trace_array *tr)
|
|
|
+{
|
|
|
+ if (!tr)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (tr->trace_buffer.buffer) {
|
|
|
+ ring_buffer_free(tr->trace_buffer.buffer);
|
|
|
+ tr->trace_buffer.buffer = NULL;
|
|
|
+ free_percpu(tr->trace_buffer.data);
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
+ if (tr->max_buffer.buffer) {
|
|
|
+ ring_buffer_free(tr->max_buffer.buffer);
|
|
|
+ tr->max_buffer.buffer = NULL;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
static int new_instance_create(const char *name)
|
|
|
{
|
|
|
struct trace_array *tr;
|
|
@@ -6131,6 +6280,8 @@ static int new_instance_create(const char *name)
|
|
|
|
|
|
raw_spin_lock_init(&tr->start_lock);
|
|
|
|
|
|
+ tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
+
|
|
|
tr->current_trace = &nop_trace;
|
|
|
|
|
|
INIT_LIST_HEAD(&tr->systems);
|
|
@@ -6158,8 +6309,7 @@ static int new_instance_create(const char *name)
|
|
|
return 0;
|
|
|
|
|
|
out_free_tr:
|
|
|
- if (tr->trace_buffer.buffer)
|
|
|
- ring_buffer_free(tr->trace_buffer.buffer);
|
|
|
+ free_trace_buffers(tr);
|
|
|
free_cpumask_var(tr->tracing_cpumask);
|
|
|
kfree(tr->name);
|
|
|
kfree(tr);
|
|
@@ -6199,8 +6349,7 @@ static int instance_delete(const char *name)
|
|
|
event_trace_del_tracer(tr);
|
|
|
ftrace_destroy_function_files(tr);
|
|
|
debugfs_remove_recursive(tr->dir);
|
|
|
- free_percpu(tr->trace_buffer.data);
|
|
|
- ring_buffer_free(tr->trace_buffer.buffer);
|
|
|
+ free_trace_buffers(tr);
|
|
|
|
|
|
kfree(tr->name);
|
|
|
kfree(tr);
|
|
@@ -6328,6 +6477,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
|
|
|
trace_create_file("tracing_on", 0644, d_tracer,
|
|
|
tr, &rb_simple_fops);
|
|
|
|
|
|
+#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
+ trace_create_file("tracing_max_latency", 0644, d_tracer,
|
|
|
+ &tr->max_latency, &tracing_max_lat_fops);
|
|
|
+#endif
|
|
|
+
|
|
|
if (ftrace_create_function_files(tr, d_tracer))
|
|
|
WARN(1, "Could not allocate function filter files");
|
|
|
|
|
@@ -6353,11 +6507,6 @@ static __init int tracer_init_debugfs(void)
|
|
|
|
|
|
init_tracer_debugfs(&global_trace, d_tracer);
|
|
|
|
|
|
-#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
- trace_create_file("tracing_max_latency", 0644, d_tracer,
|
|
|
- &tracing_max_latency, &tracing_max_lat_fops);
|
|
|
-#endif
|
|
|
-
|
|
|
trace_create_file("tracing_thresh", 0644, d_tracer,
|
|
|
&tracing_thresh, &tracing_max_lat_fops);
|
|
|
|
|
@@ -6367,6 +6516,9 @@ static __init int tracer_init_debugfs(void)
|
|
|
trace_create_file("saved_cmdlines", 0444, d_tracer,
|
|
|
NULL, &tracing_saved_cmdlines_fops);
|
|
|
|
|
|
+ trace_create_file("saved_cmdlines_size", 0644, d_tracer,
|
|
|
+ NULL, &tracing_saved_cmdlines_size_fops);
|
|
|
+
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
|
|
|
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
|
|
@@ -6603,18 +6755,19 @@ __init static int tracer_alloc_buffers(void)
|
|
|
if (!temp_buffer)
|
|
|
goto out_free_cpumask;
|
|
|
|
|
|
+ if (trace_create_savedcmd() < 0)
|
|
|
+ goto out_free_temp_buffer;
|
|
|
+
|
|
|
/* TODO: make the number of buffers hot pluggable with CPUS */
|
|
|
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
|
|
|
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
|
|
|
WARN_ON(1);
|
|
|
- goto out_free_temp_buffer;
|
|
|
+ goto out_free_savedcmd;
|
|
|
}
|
|
|
|
|
|
if (global_trace.buffer_disabled)
|
|
|
tracing_off();
|
|
|
|
|
|
- trace_init_cmdlines();
|
|
|
-
|
|
|
if (trace_boot_clock) {
|
|
|
ret = tracing_set_clock(&global_trace, trace_boot_clock);
|
|
|
if (ret < 0)
|
|
@@ -6629,6 +6782,10 @@ __init static int tracer_alloc_buffers(void)
|
|
|
*/
|
|
|
global_trace.current_trace = &nop_trace;
|
|
|
|
|
|
+ global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
+
|
|
|
+ ftrace_init_global_array_ops(&global_trace);
|
|
|
+
|
|
|
register_tracer(&nop_trace);
|
|
|
|
|
|
/* All seems OK, enable tracing */
|
|
@@ -6656,13 +6813,11 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
+out_free_savedcmd:
|
|
|
+ free_saved_cmdlines_buffer(savedcmd);
|
|
|
out_free_temp_buffer:
|
|
|
ring_buffer_free(temp_buffer);
|
|
|
out_free_cpumask:
|
|
|
- free_percpu(global_trace.trace_buffer.data);
|
|
|
-#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
- free_percpu(global_trace.max_buffer.data);
|
|
|
-#endif
|
|
|
free_cpumask_var(global_trace.tracing_cpumask);
|
|
|
out_free_buffer_mask:
|
|
|
free_cpumask_var(tracing_buffer_mask);
|