|
@@ -213,14 +213,52 @@ EXPORT_SYMBOL(local_bh_enable_ip);
|
|
|
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
|
|
|
#define MAX_SOFTIRQ_RESTART 10
|
|
|
|
|
|
+#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
+/*
|
|
|
+ * Convoluted means of passing __do_softirq() a message through the various
|
|
|
+ * architecture execute_on_stack() bits.
|
|
|
+ *
|
|
|
+ * When we run softirqs from irq_exit() and thus on the hardirq stack we need
|
|
|
+ * to keep the lockdep irq context tracking as tight as possible in order to
|
|
|
+ * not miss-qualify lock contexts and miss possible deadlocks.
|
|
|
+ */
|
|
|
+static DEFINE_PER_CPU(int, softirq_from_hardirq);
|
|
|
+
|
|
|
+static inline void lockdep_softirq_from_hardirq(void)
|
|
|
+{
|
|
|
+ this_cpu_write(softirq_from_hardirq, 1);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void lockdep_softirq_start(void)
|
|
|
+{
|
|
|
+ if (this_cpu_read(softirq_from_hardirq))
|
|
|
+ trace_hardirq_exit();
|
|
|
+ lockdep_softirq_enter();
|
|
|
+}
|
|
|
+
|
|
|
+static inline void lockdep_softirq_end(void)
|
|
|
+{
|
|
|
+ lockdep_softirq_exit();
|
|
|
+ if (this_cpu_read(softirq_from_hardirq)) {
|
|
|
+ this_cpu_write(softirq_from_hardirq, 0);
|
|
|
+ trace_hardirq_enter();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+static inline void lockdep_softirq_from_hardirq(void) { }
|
|
|
+static inline void lockdep_softirq_start(void) { }
|
|
|
+static inline void lockdep_softirq_end(void) { }
|
|
|
+#endif
|
|
|
+
|
|
|
asmlinkage void __do_softirq(void)
|
|
|
{
|
|
|
- struct softirq_action *h;
|
|
|
- __u32 pending;
|
|
|
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
|
|
- int cpu;
|
|
|
unsigned long old_flags = current->flags;
|
|
|
int max_restart = MAX_SOFTIRQ_RESTART;
|
|
|
+ struct softirq_action *h;
|
|
|
+ __u32 pending;
|
|
|
+ int cpu;
|
|
|
|
|
|
/*
|
|
|
* Mask out PF_MEMALLOC s current task context is borrowed for the
|
|
@@ -233,7 +271,7 @@ asmlinkage void __do_softirq(void)
|
|
|
account_irq_enter_time(current);
|
|
|
|
|
|
__local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
|
|
|
- lockdep_softirq_enter();
|
|
|
+ lockdep_softirq_start();
|
|
|
|
|
|
cpu = smp_processor_id();
|
|
|
restart:
|
|
@@ -280,16 +318,13 @@ restart:
|
|
|
wakeup_softirqd();
|
|
|
}
|
|
|
|
|
|
- lockdep_softirq_exit();
|
|
|
-
|
|
|
+ lockdep_softirq_end();
|
|
|
account_irq_exit_time(current);
|
|
|
__local_bh_enable(SOFTIRQ_OFFSET);
|
|
|
WARN_ON_ONCE(in_interrupt());
|
|
|
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-
|
|
|
asmlinkage void do_softirq(void)
|
|
|
{
|
|
|
__u32 pending;
|
|
@@ -332,6 +367,7 @@ void irq_enter(void)
|
|
|
static inline void invoke_softirq(void)
|
|
|
{
|
|
|
if (!force_irqthreads) {
|
|
|
+ lockdep_softirq_from_hardirq();
|
|
|
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
|
|
/*
|
|
|
* We can safely execute softirq on the current stack if
|
|
@@ -377,13 +413,13 @@ void irq_exit(void)
|
|
|
#endif
|
|
|
|
|
|
account_irq_exit_time(current);
|
|
|
- trace_hardirq_exit();
|
|
|
preempt_count_sub(HARDIRQ_OFFSET);
|
|
|
if (!in_interrupt() && local_softirq_pending())
|
|
|
invoke_softirq();
|
|
|
|
|
|
tick_irq_exit();
|
|
|
rcu_irq_exit();
|
|
|
+ trace_hardirq_exit(); /* must be last! */
|
|
|
}
|
|
|
|
|
|
/*
|