Browse Source

arm64: remove irq_count and do_softirq_own_stack()

sysrq_handle_reboot() re-enables interrupts while on the irq stack. The
irq_stack implementation wrongly assumed this would only ever happen
via the softirq path, allowing it to update irq_count late, in
do_softirq_own_stack().

This means if an irq occurs in sysrq_handle_reboot(), during
emergency_restart() the stack will be corrupted, as irq_count wasn't
updated.

Lose the optimisation, and instead of moving the adding/subtracting of
irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare
sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us
if we are on a task stack, if so, we can safely switch to the irq stack.
Finally, remove do_softirq_own_stack(), we don't need it anymore.

Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use get_thread_info macro]
Signed-off-by: Will Deacon <will.deacon@arm.com>
James Morse 9 years ago
parent
commit
d224a69e3d
3 changed files with 11 additions and 48 deletions
  1. 0 2
      arch/arm64/include/asm/irq.h
  2. 10 9
      arch/arm64/kernel/entry.S
  3. 1 37
      arch/arm64/kernel/irq.c

+ 0 - 2
arch/arm64/include/asm/irq.h

@@ -11,8 +11,6 @@
 #include <asm-generic/irq.h>
 #include <asm-generic/irq.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 
 
-#define __ARCH_HAS_DO_SOFTIRQ
-
 struct pt_regs;
 struct pt_regs;
 
 
 DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
 DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);

+ 10 - 9
arch/arm64/kernel/entry.S

@@ -181,19 +181,20 @@ alternative_endif
 	.macro	irq_stack_entry
 	.macro	irq_stack_entry
 	mov	x19, sp			// preserve the original sp
 	mov	x19, sp			// preserve the original sp
 
 
-	this_cpu_ptr irq_stack, x25, x26
-
 	/*
 	/*
-	 * Check the lowest address on irq_stack for the irq_count value,
-	 * incremented by do_softirq_own_stack if we have re-enabled irqs
-	 * while on the irq_stack.
+	 * Compare sp with the current thread_info, if the top
+	 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
+	 * should switch to the irq stack.
 	 */
 	 */
-	ldr	x26, [x25]
-	cbnz	x26, 9998f		// recursive use?
+	and	x25, x19, #~(THREAD_SIZE - 1)
+	cmp	x25, tsk
+	b.ne	9998f
 
 
-	/* switch to the irq stack */
+	this_cpu_ptr irq_stack, x25, x26
 	mov	x26, #IRQ_STACK_START_SP
 	mov	x26, #IRQ_STACK_START_SP
 	add	x26, x25, x26
 	add	x26, x25, x26
+
+	/* switch to the irq stack */
 	mov	sp, x26
 	mov	sp, x26
 
 
 	/*
 	/*
@@ -405,10 +406,10 @@ el1_irq:
 	bl	trace_hardirqs_off
 	bl	trace_hardirqs_off
 #endif
 #endif
 
 
+	get_thread_info tsk
 	irq_handler
 	irq_handler
 
 
 #ifdef CONFIG_PREEMPT
 #ifdef CONFIG_PREEMPT
-	get_thread_info tsk
 	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
 	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
 	cbnz	w24, 1f				// preempt count != 0
 	cbnz	w24, 1f				// preempt count != 0
 	ldr	x0, [tsk, #TI_FLAGS]		// get flags
 	ldr	x0, [tsk, #TI_FLAGS]		// get flags

+ 1 - 37
arch/arm64/kernel/irq.c

@@ -25,24 +25,14 @@
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/init.h>
-#include <linux/interrupt.h>
 #include <linux/irqchip.h>
 #include <linux/irqchip.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 
 
 unsigned long irq_err_count;
 unsigned long irq_err_count;
 
 
-/*
- * irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned.
- * irq_stack[0] is used as irq_count, a non-zero value indicates the stack
- * is in use, and el?_irq() shouldn't switch to it. This is used to detect
- * recursive use of the irq_stack, it is lazily updated by
- * do_softirq_own_stack(), which is called on the irq_stack, before
- * re-enabling interrupts to process softirqs.
- */
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
 DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
 DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
 
 
-#define IRQ_COUNT()	(*per_cpu(irq_stack, smp_processor_id()))
-
 int arch_show_interrupts(struct seq_file *p, int prec)
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
 {
 	show_ipi_list(p, prec);
 	show_ipi_list(p, prec);
@@ -66,29 +56,3 @@ void __init init_IRQ(void)
 	if (!handle_arch_irq)
 	if (!handle_arch_irq)
 		panic("No interrupt controller found.");
 		panic("No interrupt controller found.");
 }
 }
-
-/*
- * do_softirq_own_stack() is called from irq_exit() before __do_softirq()
- * re-enables interrupts, at which point we may re-enter el?_irq(). We
- * increase irq_count here so that el1_irq() knows that it is already on the
- * irq stack.
- *
- * Called with interrupts disabled, so we don't worry about moving cpu, or
- * being interrupted while modifying irq_count.
- *
- * This function doesn't actually switch stack.
- */
-void do_softirq_own_stack(void)
-{
-	int cpu = smp_processor_id();
-
-	WARN_ON_ONCE(!irqs_disabled());
-
-	if (on_irq_stack(current_stack_pointer, cpu)) {
-		IRQ_COUNT()++;
-		__do_softirq();
-		IRQ_COUNT()--;
-	} else {
-		__do_softirq();
-	}
-}