|
@@ -1085,7 +1085,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
}
|
|
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
|
|
|
|
|
-static void default_wait_pipe(struct trace_iterator *iter)
|
|
|
+static void wait_on_pipe(struct trace_iterator *iter)
|
|
|
{
|
|
|
/* Iterators are static, they should be filled or empty */
|
|
|
if (trace_buffer_iter(iter, iter->cpu_file))
|
|
@@ -1202,8 +1202,6 @@ int register_tracer(struct tracer *type)
|
|
|
else
|
|
|
if (!type->flags->opts)
|
|
|
type->flags->opts = dummy_tracer_opt;
|
|
|
- if (!type->wait_pipe)
|
|
|
- type->wait_pipe = default_wait_pipe;
|
|
|
|
|
|
ret = run_tracer_selftest(type);
|
|
|
if (ret < 0)
|
|
@@ -4207,25 +4205,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|
|
return trace_poll(iter, filp, poll_table);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This is a make-shift waitqueue.
|
|
|
- * A tracer might use this callback on some rare cases:
|
|
|
- *
|
|
|
- * 1) the current tracer might hold the runqueue lock when it wakes up
|
|
|
- * a reader, hence a deadlock (sched, function, and function graph tracers)
|
|
|
- * 2) the function tracers, trace all functions, we don't want
|
|
|
- * the overhead of calling wake_up and friends
|
|
|
- * (and tracing them too)
|
|
|
- *
|
|
|
- * Anyway, this is really very primitive wakeup.
|
|
|
- */
|
|
|
-void poll_wait_pipe(struct trace_iterator *iter)
|
|
|
-{
|
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
|
- /* sleep for 100 msecs, and try again. */
|
|
|
- schedule_timeout(HZ / 10);
|
|
|
-}
|
|
|
-
|
|
|
/* Must be called with trace_types_lock mutex held. */
|
|
|
static int tracing_wait_pipe(struct file *filp)
|
|
|
{
|
|
@@ -4251,7 +4230,7 @@ static int tracing_wait_pipe(struct file *filp)
|
|
|
|
|
|
mutex_unlock(&iter->mutex);
|
|
|
|
|
|
- iter->trace->wait_pipe(iter);
|
|
|
+ wait_on_pipe(iter);
|
|
|
|
|
|
mutex_lock(&iter->mutex);
|
|
|
|
|
@@ -5179,7 +5158,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
- iter->trace->wait_pipe(iter);
|
|
|
+ wait_on_pipe(iter);
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
if (signal_pending(current)) {
|
|
|
size = -EINTR;
|
|
@@ -5390,7 +5369,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|
|
goto out;
|
|
|
}
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
- iter->trace->wait_pipe(iter);
|
|
|
+ wait_on_pipe(iter);
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
if (signal_pending(current)) {
|
|
|
ret = -EINTR;
|