|
@@ -16,7 +16,6 @@
|
|
|
|
|
|
#include "trace.h"
|
|
|
|
|
|
-#define CREATE_TRACE_POINTS
|
|
|
#include <trace/events/preemptirq.h>
|
|
|
|
|
|
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
|
|
@@ -450,66 +449,6 @@ void stop_critical_timings(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(stop_critical_timings);
|
|
|
|
|
|
-#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
-#ifdef CONFIG_PROVE_LOCKING
|
|
|
-void time_hardirqs_on(unsigned long a0, unsigned long a1)
|
|
|
-{
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
- stop_critical_timing(a0, a1);
|
|
|
-}
|
|
|
-
|
|
|
-void time_hardirqs_off(unsigned long a0, unsigned long a1)
|
|
|
-{
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
- start_critical_timing(a0, a1);
|
|
|
-}
|
|
|
-
|
|
|
-#else /* !CONFIG_PROVE_LOCKING */
|
|
|
-
|
|
|
-/*
|
|
|
- * We are only interested in hardirq on/off events:
|
|
|
- */
|
|
|
-static inline void tracer_hardirqs_on(void)
|
|
|
-{
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void tracer_hardirqs_off(void)
|
|
|
-{
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
|
|
|
-{
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
- stop_critical_timing(CALLER_ADDR0, caller_addr);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
|
|
|
-{
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
- start_critical_timing(CALLER_ADDR0, caller_addr);
|
|
|
-}
|
|
|
-
|
|
|
-#endif /* CONFIG_PROVE_LOCKING */
|
|
|
-#endif /* CONFIG_IRQSOFF_TRACER */
|
|
|
-
|
|
|
-#ifdef CONFIG_PREEMPT_TRACER
|
|
|
-static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
|
|
|
-{
|
|
|
- if (preempt_trace() && !irq_trace())
|
|
|
- stop_critical_timing(a0, a1);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
|
|
|
-{
|
|
|
- if (preempt_trace() && !irq_trace())
|
|
|
- start_critical_timing(a0, a1);
|
|
|
-}
|
|
|
-#endif /* CONFIG_PREEMPT_TRACER */
|
|
|
-
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
|
static bool function_enabled;
|
|
|
|
|
@@ -659,15 +598,34 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
+/*
|
|
|
+ * We are only interested in hardirq on/off events:
|
|
|
+ */
|
|
|
+static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1)
|
|
|
+{
|
|
|
+ if (!preempt_trace() && irq_trace())
|
|
|
+ stop_critical_timing(a0, a1);
|
|
|
+}
|
|
|
+
|
|
|
+static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1)
|
|
|
+{
|
|
|
+ if (!preempt_trace() && irq_trace())
|
|
|
+ start_critical_timing(a0, a1);
|
|
|
+}
|
|
|
+
|
|
|
static int irqsoff_tracer_init(struct trace_array *tr)
|
|
|
{
|
|
|
trace_type = TRACER_IRQS_OFF;
|
|
|
|
|
|
+ register_trace_irq_disable(tracer_hardirqs_off, NULL);
|
|
|
+ register_trace_irq_enable(tracer_hardirqs_on, NULL);
|
|
|
return __irqsoff_tracer_init(tr);
|
|
|
}
|
|
|
|
|
|
static void irqsoff_tracer_reset(struct trace_array *tr)
|
|
|
{
|
|
|
+ unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
|
|
|
+ unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
|
|
|
__irqsoff_tracer_reset(tr);
|
|
|
}
|
|
|
|
|
@@ -690,21 +648,34 @@ static struct tracer irqsoff_tracer __read_mostly =
|
|
|
.allow_instances = true,
|
|
|
.use_max_tr = true,
|
|
|
};
|
|
|
-# define register_irqsoff(trace) register_tracer(&trace)
|
|
|
-#else
|
|
|
-# define register_irqsoff(trace) do { } while (0)
|
|
|
-#endif
|
|
|
+#endif /* CONFIG_IRQSOFF_TRACER */
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
|
|
+static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1)
|
|
|
+{
|
|
|
+ if (preempt_trace() && !irq_trace())
|
|
|
+ stop_critical_timing(a0, a1);
|
|
|
+}
|
|
|
+
|
|
|
+static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1)
|
|
|
+{
|
|
|
+ if (preempt_trace() && !irq_trace())
|
|
|
+ start_critical_timing(a0, a1);
|
|
|
+}
|
|
|
+
|
|
|
static int preemptoff_tracer_init(struct trace_array *tr)
|
|
|
{
|
|
|
trace_type = TRACER_PREEMPT_OFF;
|
|
|
|
|
|
+ register_trace_preempt_disable(tracer_preempt_off, NULL);
|
|
|
+ register_trace_preempt_enable(tracer_preempt_on, NULL);
|
|
|
return __irqsoff_tracer_init(tr);
|
|
|
}
|
|
|
|
|
|
static void preemptoff_tracer_reset(struct trace_array *tr)
|
|
|
{
|
|
|
+ unregister_trace_preempt_disable(tracer_preempt_off, NULL);
|
|
|
+ unregister_trace_preempt_enable(tracer_preempt_on, NULL);
|
|
|
__irqsoff_tracer_reset(tr);
|
|
|
}
|
|
|
|
|
@@ -727,23 +698,29 @@ static struct tracer preemptoff_tracer __read_mostly =
|
|
|
.allow_instances = true,
|
|
|
.use_max_tr = true,
|
|
|
};
|
|
|
-# define register_preemptoff(trace) register_tracer(&trace)
|
|
|
-#else
|
|
|
-# define register_preemptoff(trace) do { } while (0)
|
|
|
-#endif
|
|
|
+#endif /* CONFIG_PREEMPT_TRACER */
|
|
|
|
|
|
-#if defined(CONFIG_IRQSOFF_TRACER) && \
|
|
|
- defined(CONFIG_PREEMPT_TRACER)
|
|
|
+#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
|
|
|
|
|
|
static int preemptirqsoff_tracer_init(struct trace_array *tr)
|
|
|
{
|
|
|
trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
|
|
|
|
|
|
+ register_trace_irq_disable(tracer_hardirqs_off, NULL);
|
|
|
+ register_trace_irq_enable(tracer_hardirqs_on, NULL);
|
|
|
+ register_trace_preempt_disable(tracer_preempt_off, NULL);
|
|
|
+ register_trace_preempt_enable(tracer_preempt_on, NULL);
|
|
|
+
|
|
|
return __irqsoff_tracer_init(tr);
|
|
|
}
|
|
|
|
|
|
static void preemptirqsoff_tracer_reset(struct trace_array *tr)
|
|
|
{
|
|
|
+ unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
|
|
|
+ unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
|
|
|
+ unregister_trace_preempt_disable(tracer_preempt_off, NULL);
|
|
|
+ unregister_trace_preempt_enable(tracer_preempt_on, NULL);
|
|
|
+
|
|
|
__irqsoff_tracer_reset(tr);
|
|
|
}
|
|
|
|
|
@@ -766,115 +743,21 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
|
|
|
.allow_instances = true,
|
|
|
.use_max_tr = true,
|
|
|
};
|
|
|
-
|
|
|
-# define register_preemptirqsoff(trace) register_tracer(&trace)
|
|
|
-#else
|
|
|
-# define register_preemptirqsoff(trace) do { } while (0)
|
|
|
#endif
|
|
|
|
|
|
__init static int init_irqsoff_tracer(void)
|
|
|
{
|
|
|
- register_irqsoff(irqsoff_tracer);
|
|
|
- register_preemptoff(preemptoff_tracer);
|
|
|
- register_preemptirqsoff(preemptirqsoff_tracer);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-core_initcall(init_irqsoff_tracer);
|
|
|
-#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
|
|
|
-
|
|
|
-#ifndef CONFIG_IRQSOFF_TRACER
|
|
|
-static inline void tracer_hardirqs_on(void) { }
|
|
|
-static inline void tracer_hardirqs_off(void) { }
|
|
|
-static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
|
|
|
-static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
|
|
|
+#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
+ register_tracer(&irqsoff_tracer);
|
|
|
#endif
|
|
|
-
|
|
|
-#ifndef CONFIG_PREEMPT_TRACER
|
|
|
-static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
|
|
|
-static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
|
|
|
+#ifdef CONFIG_PREEMPT_TRACER
|
|
|
+ register_tracer(&preemptoff_tracer);
|
|
|
#endif
|
|
|
-
|
|
|
-#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
|
|
|
-/* Per-cpu variable to prevent redundant calls when IRQs already off */
|
|
|
-static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
|
|
-
|
|
|
-void trace_hardirqs_on(void)
|
|
|
-{
|
|
|
- if (!this_cpu_read(tracing_irq_cpu))
|
|
|
- return;
|
|
|
-
|
|
|
- trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
- tracer_hardirqs_on();
|
|
|
-
|
|
|
- this_cpu_write(tracing_irq_cpu, 0);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_on);
|
|
|
-
|
|
|
-void trace_hardirqs_off(void)
|
|
|
-{
|
|
|
- if (this_cpu_read(tracing_irq_cpu))
|
|
|
- return;
|
|
|
-
|
|
|
- this_cpu_write(tracing_irq_cpu, 1);
|
|
|
-
|
|
|
- trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
- tracer_hardirqs_off();
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_off);
|
|
|
-
|
|
|
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
|
|
-{
|
|
|
- if (!this_cpu_read(tracing_irq_cpu))
|
|
|
- return;
|
|
|
-
|
|
|
- trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
|
- tracer_hardirqs_on_caller(caller_addr);
|
|
|
-
|
|
|
- this_cpu_write(tracing_irq_cpu, 0);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
|
|
-
|
|
|
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
|
|
|
-{
|
|
|
- if (this_cpu_read(tracing_irq_cpu))
|
|
|
- return;
|
|
|
-
|
|
|
- this_cpu_write(tracing_irq_cpu, 1);
|
|
|
-
|
|
|
- trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
|
- tracer_hardirqs_off_caller(caller_addr);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
|
|
-
|
|
|
-/*
|
|
|
- * Stubs:
|
|
|
- */
|
|
|
-
|
|
|
-void trace_softirqs_on(unsigned long ip)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-void trace_softirqs_off(unsigned long ip)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-inline void print_irqtrace_events(struct task_struct *curr)
|
|
|
-{
|
|
|
-}
|
|
|
+#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
|
|
|
+ register_tracer(&preemptirqsoff_tracer);
|
|
|
#endif
|
|
|
|
|
|
-#if defined(CONFIG_PREEMPT_TRACER) || \
|
|
|
- (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
|
|
|
-void trace_preempt_on(unsigned long a0, unsigned long a1)
|
|
|
-{
|
|
|
- trace_preempt_enable_rcuidle(a0, a1);
|
|
|
- tracer_preempt_on(a0, a1);
|
|
|
-}
|
|
|
-
|
|
|
-void trace_preempt_off(unsigned long a0, unsigned long a1)
|
|
|
-{
|
|
|
- trace_preempt_disable_rcuidle(a0, a1);
|
|
|
- tracer_preempt_off(a0, a1);
|
|
|
+ return 0;
|
|
|
}
|
|
|
-#endif
|
|
|
+core_initcall(init_irqsoff_tracer);
|
|
|
+#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
|