|
@@ -1,3 +1,4 @@
|
|
|
|
+// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
/*
|
|
* trace irqs off critical timings
|
|
* trace irqs off critical timings
|
|
*
|
|
*
|
|
@@ -16,7 +17,6 @@
|
|
|
|
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
|
|
|
|
-#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/preemptirq.h>
|
|
#include <trace/events/preemptirq.h>
|
|
|
|
|
|
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
|
|
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
|
|
@@ -41,12 +41,12 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph);
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
|
static inline int
|
|
static inline int
|
|
-preempt_trace(void)
|
|
|
|
|
|
+preempt_trace(int pc)
|
|
{
|
|
{
|
|
- return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
|
|
|
|
|
|
+ return ((trace_type & TRACER_PREEMPT_OFF) && pc);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
-# define preempt_trace() (0)
|
|
|
|
|
|
+# define preempt_trace(pc) (0)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
|
@@ -367,7 +367,7 @@ out:
|
|
}
|
|
}
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
-start_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|
|
|
|
|
+start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
|
|
{
|
|
{
|
|
int cpu;
|
|
int cpu;
|
|
struct trace_array *tr = irqsoff_trace;
|
|
struct trace_array *tr = irqsoff_trace;
|
|
@@ -395,7 +395,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|
|
|
|
|
local_save_flags(flags);
|
|
local_save_flags(flags);
|
|
|
|
|
|
- __trace_function(tr, ip, parent_ip, flags, preempt_count());
|
|
|
|
|
|
+ __trace_function(tr, ip, parent_ip, flags, pc);
|
|
|
|
|
|
per_cpu(tracing_cpu, cpu) = 1;
|
|
per_cpu(tracing_cpu, cpu) = 1;
|
|
|
|
|
|
@@ -403,7 +403,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|
}
|
|
}
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
-stop_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|
|
|
|
|
+stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
|
|
{
|
|
{
|
|
int cpu;
|
|
int cpu;
|
|
struct trace_array *tr = irqsoff_trace;
|
|
struct trace_array *tr = irqsoff_trace;
|
|
@@ -429,7 +429,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|
atomic_inc(&data->disabled);
|
|
atomic_inc(&data->disabled);
|
|
|
|
|
|
local_save_flags(flags);
|
|
local_save_flags(flags);
|
|
- __trace_function(tr, ip, parent_ip, flags, preempt_count());
|
|
|
|
|
|
+ __trace_function(tr, ip, parent_ip, flags, pc);
|
|
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
|
|
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
|
|
data->critical_start = 0;
|
|
data->critical_start = 0;
|
|
atomic_dec(&data->disabled);
|
|
atomic_dec(&data->disabled);
|
|
@@ -438,77 +438,21 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
|
|
/* start and stop critical timings used to for stoppage (in idle) */
|
|
/* start and stop critical timings used to for stoppage (in idle) */
|
|
void start_critical_timings(void)
|
|
void start_critical_timings(void)
|
|
{
|
|
{
|
|
- if (preempt_trace() || irq_trace())
|
|
|
|
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
|
|
|
+ int pc = preempt_count();
|
|
|
|
+
|
|
|
|
+ if (preempt_trace(pc) || irq_trace())
|
|
|
|
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(start_critical_timings);
|
|
EXPORT_SYMBOL_GPL(start_critical_timings);
|
|
|
|
|
|
void stop_critical_timings(void)
|
|
void stop_critical_timings(void)
|
|
{
|
|
{
|
|
- if (preempt_trace() || irq_trace())
|
|
|
|
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(stop_critical_timings);
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
|
-#ifdef CONFIG_PROVE_LOCKING
|
|
|
|
-void time_hardirqs_on(unsigned long a0, unsigned long a1)
|
|
|
|
-{
|
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
|
- stop_critical_timing(a0, a1);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void time_hardirqs_off(unsigned long a0, unsigned long a1)
|
|
|
|
-{
|
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
|
- start_critical_timing(a0, a1);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#else /* !CONFIG_PROVE_LOCKING */
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * We are only interested in hardirq on/off events:
|
|
|
|
- */
|
|
|
|
-static inline void tracer_hardirqs_on(void)
|
|
|
|
-{
|
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
|
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
|
-}
|
|
|
|
|
|
+ int pc = preempt_count();
|
|
|
|
|
|
-static inline void tracer_hardirqs_off(void)
|
|
|
|
-{
|
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
|
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
|
|
|
|
-{
|
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
|
- stop_critical_timing(CALLER_ADDR0, caller_addr);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
|
|
|
|
-{
|
|
|
|
- if (!preempt_trace() && irq_trace())
|
|
|
|
- start_critical_timing(CALLER_ADDR0, caller_addr);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#endif /* CONFIG_PROVE_LOCKING */
|
|
|
|
-#endif /* CONFIG_IRQSOFF_TRACER */
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_PREEMPT_TRACER
|
|
|
|
-static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
|
|
|
|
-{
|
|
|
|
- if (preempt_trace() && !irq_trace())
|
|
|
|
- stop_critical_timing(a0, a1);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
|
|
|
|
-{
|
|
|
|
- if (preempt_trace() && !irq_trace())
|
|
|
|
- start_critical_timing(a0, a1);
|
|
|
|
|
|
+ if (preempt_trace(pc) || irq_trace())
|
|
|
|
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
|
|
}
|
|
}
|
|
-#endif /* CONFIG_PREEMPT_TRACER */
|
|
|
|
|
|
+EXPORT_SYMBOL_GPL(stop_critical_timings);
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
static bool function_enabled;
|
|
static bool function_enabled;
|
|
@@ -634,7 +578,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void irqsoff_tracer_reset(struct trace_array *tr)
|
|
|
|
|
|
+static void __irqsoff_tracer_reset(struct trace_array *tr)
|
|
{
|
|
{
|
|
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
|
|
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
|
|
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
|
|
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
|
|
@@ -659,12 +603,37 @@ static void irqsoff_tracer_stop(struct trace_array *tr)
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
|
+/*
|
|
|
|
+ * We are only interested in hardirq on/off events:
|
|
|
|
+ */
|
|
|
|
+void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
|
|
|
|
+{
|
|
|
|
+ unsigned int pc = preempt_count();
|
|
|
|
+
|
|
|
|
+ if (!preempt_trace(pc) && irq_trace())
|
|
|
|
+ stop_critical_timing(a0, a1, pc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
|
|
|
|
+{
|
|
|
|
+ unsigned int pc = preempt_count();
|
|
|
|
+
|
|
|
|
+ if (!preempt_trace(pc) && irq_trace())
|
|
|
|
+ start_critical_timing(a0, a1, pc);
|
|
|
|
+}
|
|
|
|
+
|
|
static int irqsoff_tracer_init(struct trace_array *tr)
|
|
static int irqsoff_tracer_init(struct trace_array *tr)
|
|
{
|
|
{
|
|
trace_type = TRACER_IRQS_OFF;
|
|
trace_type = TRACER_IRQS_OFF;
|
|
|
|
|
|
return __irqsoff_tracer_init(tr);
|
|
return __irqsoff_tracer_init(tr);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static void irqsoff_tracer_reset(struct trace_array *tr)
|
|
|
|
+{
|
|
|
|
+ __irqsoff_tracer_reset(tr);
|
|
|
|
+}
|
|
|
|
+
|
|
static struct tracer irqsoff_tracer __read_mostly =
|
|
static struct tracer irqsoff_tracer __read_mostly =
|
|
{
|
|
{
|
|
.name = "irqsoff",
|
|
.name = "irqsoff",
|
|
@@ -684,12 +653,25 @@ static struct tracer irqsoff_tracer __read_mostly =
|
|
.allow_instances = true,
|
|
.allow_instances = true,
|
|
.use_max_tr = true,
|
|
.use_max_tr = true,
|
|
};
|
|
};
|
|
-# define register_irqsoff(trace) register_tracer(&trace)
|
|
|
|
-#else
|
|
|
|
-# define register_irqsoff(trace) do { } while (0)
|
|
|
|
-#endif
|
|
|
|
|
|
+#endif /* CONFIG_IRQSOFF_TRACER */
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
|
|
|
+void tracer_preempt_on(unsigned long a0, unsigned long a1)
|
|
|
|
+{
|
|
|
|
+ int pc = preempt_count();
|
|
|
|
+
|
|
|
|
+ if (preempt_trace(pc) && !irq_trace())
|
|
|
|
+ stop_critical_timing(a0, a1, pc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void tracer_preempt_off(unsigned long a0, unsigned long a1)
|
|
|
|
+{
|
|
|
|
+ int pc = preempt_count();
|
|
|
|
+
|
|
|
|
+ if (preempt_trace(pc) && !irq_trace())
|
|
|
|
+ start_critical_timing(a0, a1, pc);
|
|
|
|
+}
|
|
|
|
+
|
|
static int preemptoff_tracer_init(struct trace_array *tr)
|
|
static int preemptoff_tracer_init(struct trace_array *tr)
|
|
{
|
|
{
|
|
trace_type = TRACER_PREEMPT_OFF;
|
|
trace_type = TRACER_PREEMPT_OFF;
|
|
@@ -697,11 +679,16 @@ static int preemptoff_tracer_init(struct trace_array *tr)
|
|
return __irqsoff_tracer_init(tr);
|
|
return __irqsoff_tracer_init(tr);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void preemptoff_tracer_reset(struct trace_array *tr)
|
|
|
|
+{
|
|
|
|
+ __irqsoff_tracer_reset(tr);
|
|
|
|
+}
|
|
|
|
+
|
|
static struct tracer preemptoff_tracer __read_mostly =
|
|
static struct tracer preemptoff_tracer __read_mostly =
|
|
{
|
|
{
|
|
.name = "preemptoff",
|
|
.name = "preemptoff",
|
|
.init = preemptoff_tracer_init,
|
|
.init = preemptoff_tracer_init,
|
|
- .reset = irqsoff_tracer_reset,
|
|
|
|
|
|
+ .reset = preemptoff_tracer_reset,
|
|
.start = irqsoff_tracer_start,
|
|
.start = irqsoff_tracer_start,
|
|
.stop = irqsoff_tracer_stop,
|
|
.stop = irqsoff_tracer_stop,
|
|
.print_max = true,
|
|
.print_max = true,
|
|
@@ -716,13 +703,9 @@ static struct tracer preemptoff_tracer __read_mostly =
|
|
.allow_instances = true,
|
|
.allow_instances = true,
|
|
.use_max_tr = true,
|
|
.use_max_tr = true,
|
|
};
|
|
};
|
|
-# define register_preemptoff(trace) register_tracer(&trace)
|
|
|
|
-#else
|
|
|
|
-# define register_preemptoff(trace) do { } while (0)
|
|
|
|
-#endif
|
|
|
|
|
|
+#endif /* CONFIG_PREEMPT_TRACER */
|
|
|
|
|
|
-#if defined(CONFIG_IRQSOFF_TRACER) && \
|
|
|
|
- defined(CONFIG_PREEMPT_TRACER)
|
|
|
|
|
|
+#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
|
|
|
|
|
|
static int preemptirqsoff_tracer_init(struct trace_array *tr)
|
|
static int preemptirqsoff_tracer_init(struct trace_array *tr)
|
|
{
|
|
{
|
|
@@ -731,11 +714,16 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr)
|
|
return __irqsoff_tracer_init(tr);
|
|
return __irqsoff_tracer_init(tr);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void preemptirqsoff_tracer_reset(struct trace_array *tr)
|
|
|
|
+{
|
|
|
|
+ __irqsoff_tracer_reset(tr);
|
|
|
|
+}
|
|
|
|
+
|
|
static struct tracer preemptirqsoff_tracer __read_mostly =
|
|
static struct tracer preemptirqsoff_tracer __read_mostly =
|
|
{
|
|
{
|
|
.name = "preemptirqsoff",
|
|
.name = "preemptirqsoff",
|
|
.init = preemptirqsoff_tracer_init,
|
|
.init = preemptirqsoff_tracer_init,
|
|
- .reset = irqsoff_tracer_reset,
|
|
|
|
|
|
+ .reset = preemptirqsoff_tracer_reset,
|
|
.start = irqsoff_tracer_start,
|
|
.start = irqsoff_tracer_start,
|
|
.stop = irqsoff_tracer_stop,
|
|
.stop = irqsoff_tracer_stop,
|
|
.print_max = true,
|
|
.print_max = true,
|
|
@@ -750,115 +738,21 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
|
|
.allow_instances = true,
|
|
.allow_instances = true,
|
|
.use_max_tr = true,
|
|
.use_max_tr = true,
|
|
};
|
|
};
|
|
-
|
|
|
|
-# define register_preemptirqsoff(trace) register_tracer(&trace)
|
|
|
|
-#else
|
|
|
|
-# define register_preemptirqsoff(trace) do { } while (0)
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
__init static int init_irqsoff_tracer(void)
|
|
__init static int init_irqsoff_tracer(void)
|
|
{
|
|
{
|
|
- register_irqsoff(irqsoff_tracer);
|
|
|
|
- register_preemptoff(preemptoff_tracer);
|
|
|
|
- register_preemptirqsoff(preemptirqsoff_tracer);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-core_initcall(init_irqsoff_tracer);
|
|
|
|
-#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
|
|
|
|
-
|
|
|
|
-#ifndef CONFIG_IRQSOFF_TRACER
|
|
|
|
-static inline void tracer_hardirqs_on(void) { }
|
|
|
|
-static inline void tracer_hardirqs_off(void) { }
|
|
|
|
-static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
|
|
|
|
-static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
|
|
|
|
|
|
+#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
|
+ register_tracer(&irqsoff_tracer);
|
|
#endif
|
|
#endif
|
|
-
|
|
|
|
-#ifndef CONFIG_PREEMPT_TRACER
|
|
|
|
-static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
|
|
|
|
-static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT_TRACER
|
|
|
|
+ register_tracer(&preemptoff_tracer);
|
|
#endif
|
|
#endif
|
|
-
|
|
|
|
-#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
|
|
|
|
-/* Per-cpu variable to prevent redundant calls when IRQs already off */
|
|
|
|
-static DEFINE_PER_CPU(int, tracing_irq_cpu);
|
|
|
|
-
|
|
|
|
-void trace_hardirqs_on(void)
|
|
|
|
-{
|
|
|
|
- if (!this_cpu_read(tracing_irq_cpu))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
|
- tracer_hardirqs_on();
|
|
|
|
-
|
|
|
|
- this_cpu_write(tracing_irq_cpu, 0);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_on);
|
|
|
|
-
|
|
|
|
-void trace_hardirqs_off(void)
|
|
|
|
-{
|
|
|
|
- if (this_cpu_read(tracing_irq_cpu))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- this_cpu_write(tracing_irq_cpu, 1);
|
|
|
|
-
|
|
|
|
- trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
|
|
|
|
- tracer_hardirqs_off();
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_off);
|
|
|
|
-
|
|
|
|
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
|
|
|
-{
|
|
|
|
- if (!this_cpu_read(tracing_irq_cpu))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
|
|
- tracer_hardirqs_on_caller(caller_addr);
|
|
|
|
-
|
|
|
|
- this_cpu_write(tracing_irq_cpu, 0);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
|
|
|
-
|
|
|
|
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
|
|
|
|
-{
|
|
|
|
- if (this_cpu_read(tracing_irq_cpu))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- this_cpu_write(tracing_irq_cpu, 1);
|
|
|
|
-
|
|
|
|
- trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
|
|
|
|
- tracer_hardirqs_off_caller(caller_addr);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Stubs:
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-void trace_softirqs_on(unsigned long ip)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void trace_softirqs_off(unsigned long ip)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-inline void print_irqtrace_events(struct task_struct *curr)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
|
|
+#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
|
|
|
|
+ register_tracer(&preemptirqsoff_tracer);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#if defined(CONFIG_PREEMPT_TRACER) || \
|
|
|
|
- (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
|
|
|
|
-void trace_preempt_on(unsigned long a0, unsigned long a1)
|
|
|
|
-{
|
|
|
|
- trace_preempt_enable_rcuidle(a0, a1);
|
|
|
|
- tracer_preempt_on(a0, a1);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void trace_preempt_off(unsigned long a0, unsigned long a1)
|
|
|
|
-{
|
|
|
|
- trace_preempt_disable_rcuidle(a0, a1);
|
|
|
|
- tracer_preempt_off(a0, a1);
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
|
|
+core_initcall(init_irqsoff_tracer);
|
|
|
|
+#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
|