Browse Source

Merge branch 'arm64/exception-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux into for-next/core

* 'arm64/exception-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux:
  arm64: unwind: remove sp from struct stackframe
  arm64: unwind: reference pt_regs via embedded stack frame
  arm64: unwind: disregard frame.sp when validating frame pointer
  arm64: unwind: avoid percpu indirection for irq stack
  arm64: move non-entry code out of .entry.text
  arm64: consistently use bl for C exception entry
  arm64: Add ASM_BUG()
Catalin Marinas 8 years ago
parent
commit
0553896787

+ 54 - 0
arch/arm64/include/asm/asm-bug.h

@@ -0,0 +1,54 @@
+#ifndef __ASM_ASM_BUG_H
+/*
+ * Copyright (C) 2017  ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#define __ASM_ASM_BUG_H
+
+#include <asm/brk-imm.h>
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
+#define __BUGVERBOSE_LOCATION(file, line)			\
+		.pushsection .rodata.str,"aMS",@progbits,1;	\
+	2:	.string file;					\
+		.popsection;					\
+								\
+		.long 2b - 0b;					\
+		.short line;
+#else
+#define _BUGVERBOSE_LOCATION(file, line)
+#endif
+
+#ifdef CONFIG_GENERIC_BUG
+
+#define __BUG_ENTRY(flags) 				\
+		.pushsection __bug_table,"aw";		\
+		.align 2;				\
+	0:	.long 1f - 0b;				\
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__)		\
+		.short flags; 				\
+		.popsection;				\
+	1:
+#else
+#define __BUG_ENTRY(flags)
+#endif
+
+#define ASM_BUG_FLAGS(flags)				\
+	__BUG_ENTRY(flags)				\
+	brk	BUG_BRK_IMM
+
+#define ASM_BUG()	ASM_BUG_FLAGS(0)
+
+#endif /* __ASM_ASM_BUG_H */

+ 11 - 0
arch/arm64/include/asm/assembler.h

@@ -409,6 +409,17 @@ alternative_endif
 	.size	__pi_##x, . - x;	\
 	.size	__pi_##x, . - x;	\
 	ENDPROC(x)
 	ENDPROC(x)
 
 
+/*
+ * Annotate a function as being unsuitable for kprobes.
+ */
+#ifdef CONFIG_KPROBES
+#define NOKPROBE(x)				\
+	.pushsection "_kprobe_blacklist", "aw";	\
+	.quad	x;				\
+	.popsection;
+#else
+#define NOKPROBE(x)
+#endif
 	/*
 	/*
 	 * Emit a 64-bit absolute little endian symbol reference in a way that
 	 * Emit a 64-bit absolute little endian symbol reference in a way that
 	 * ensures that it will be resolved at build time, even when building a
 	 * ensures that it will be resolved at build time, even when building a

+ 3 - 32
arch/arm64/include/asm/bug.h

@@ -18,41 +18,12 @@
 #ifndef _ARCH_ARM64_ASM_BUG_H
 #ifndef _ARCH_ARM64_ASM_BUG_H
 #define _ARCH_ARM64_ASM_BUG_H
 #define _ARCH_ARM64_ASM_BUG_H
 
 
-#include <asm/brk-imm.h>
+#include <linux/stringify.h>
 
 
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
-#define __BUGVERBOSE_LOCATION(file, line)				\
-		".pushsection .rodata.str,\"aMS\",@progbits,1\n"	\
-	"2:	.string \"" file "\"\n\t"				\
-		".popsection\n\t"					\
-									\
-		".long 2b - 0b\n\t"					\
-		".short " #line "\n\t"
-#else
-#define _BUGVERBOSE_LOCATION(file, line)
-#endif
-
-#ifdef CONFIG_GENERIC_BUG
-
-#define __BUG_ENTRY(flags) 				\
-		".pushsection __bug_table,\"aw\"\n\t"	\
-		".align 2\n\t"				\
-	"0:	.long 1f - 0b\n\t"			\
-_BUGVERBOSE_LOCATION(__FILE__, __LINE__)		\
-		".short " #flags "\n\t"			\
-		".popsection\n"				\
-	"1:	"
-#else
-#define __BUG_ENTRY(flags) ""
-#endif
+#include <asm/asm-bug.h>
 
 
 #define __BUG_FLAGS(flags)				\
 #define __BUG_FLAGS(flags)				\
-	asm volatile (					\
-		__BUG_ENTRY(flags)			\
-		"brk %[imm]" :: [imm] "i" (BUG_BRK_IMM)	\
-	);
-
+	asm volatile (__stringify(ASM_BUG_FLAGS(flags)));
 
 
 #define BUG() do {					\
 #define BUG() do {					\
 	__BUG_FLAGS(0);					\
 	__BUG_FLAGS(0);					\

+ 11 - 28
arch/arm64/include/asm/irq.h

@@ -7,6 +7,7 @@
 #ifndef __ASSEMBLER__
 #ifndef __ASSEMBLER__
 
 
 #include <linux/percpu.h>
 #include <linux/percpu.h>
+#include <linux/sched/task_stack.h>
 
 
 #include <asm-generic/irq.h>
 #include <asm-generic/irq.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
@@ -15,31 +16,6 @@ struct pt_regs;
 
 
 DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
 DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
 
 
-/*
- * The highest address on the stack, and the first to be used. Used to
- * find the dummy-stack frame put down by el?_irq() in entry.S, which
- * is structured as follows:
- *
- *       ------------
- *       |          |  <- irq_stack_ptr
- *   top ------------
- *       |   x19    | <- irq_stack_ptr - 0x08
- *       ------------
- *       |   x29    | <- irq_stack_ptr - 0x10
- *       ------------
- *
- * where x19 holds a copy of the task stack pointer where the struct pt_regs
- * from kernel_entry can be found.
- *
- */
-#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
-
-/*
- * The offset from irq_stack_ptr where entry.S will store the original
- * stack pointer. Used by unwind_frame() and dump_backtrace().
- */
-#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
-
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 
 
 static inline int nr_legacy_irqs(void)
 static inline int nr_legacy_irqs(void)
@@ -47,14 +23,21 @@ static inline int nr_legacy_irqs(void)
 	return 0;
 	return 0;
 }
 }
 
 
-static inline bool on_irq_stack(unsigned long sp, int cpu)
+static inline bool on_irq_stack(unsigned long sp)
 {
 {
-	/* variable names the same as kernel/stacktrace.c */
-	unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
+	unsigned long low = (unsigned long)raw_cpu_ptr(irq_stack);
 	unsigned long high = low + IRQ_STACK_START_SP;
 	unsigned long high = low + IRQ_STACK_START_SP;
 
 
 	return (low <= sp && sp <= high);
 	return (low <= sp && sp <= high);
 }
 }
 
 
+static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
+{
+	unsigned long low = (unsigned long)task_stack_page(tsk);
+	unsigned long high = low + THREAD_SIZE;
+
+	return (low <= sp && sp < high);
+}
+
 #endif /* !__ASSEMBLER__ */
 #endif /* !__ASSEMBLER__ */
 #endif
 #endif

+ 1 - 0
arch/arm64/include/asm/ptrace.h

@@ -137,6 +137,7 @@ struct pt_regs {
 
 
 	u64 orig_addr_limit;
 	u64 orig_addr_limit;
 	u64 unused;	// maintain 16 byte alignment
 	u64 unused;	// maintain 16 byte alignment
+	u64 stackframe[2];
 };
 };
 
 
 static inline bool in_syscall(struct pt_regs const *regs)
 static inline bool in_syscall(struct pt_regs const *regs)

+ 0 - 1
arch/arm64/include/asm/stacktrace.h

@@ -20,7 +20,6 @@ struct task_struct;
 
 
 struct stackframe {
 struct stackframe {
 	unsigned long fp;
 	unsigned long fp;
-	unsigned long sp;
 	unsigned long pc;
 	unsigned long pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	unsigned int graph;
 	unsigned int graph;

+ 5 - 0
arch/arm64/include/asm/traps.h

@@ -60,4 +60,9 @@ static inline int in_exception_text(unsigned long ptr)
 	return in ? : __in_irqentry_text(ptr);
 	return in ? : __in_irqentry_text(ptr);
 }
 }
 
 
+static inline int in_entry_text(unsigned long ptr)
+{
+	return ptr >= (unsigned long)&__entry_text_start &&
+	       ptr < (unsigned long)&__entry_text_end;
+}
 #endif
 #endif

+ 1 - 0
arch/arm64/kernel/asm-offsets.c

@@ -75,6 +75,7 @@ int main(void)
   DEFINE(S_ORIG_X0,		offsetof(struct pt_regs, orig_x0));
   DEFINE(S_ORIG_X0,		offsetof(struct pt_regs, orig_x0));
   DEFINE(S_SYSCALLNO,		offsetof(struct pt_regs, syscallno));
   DEFINE(S_SYSCALLNO,		offsetof(struct pt_regs, syscallno));
   DEFINE(S_ORIG_ADDR_LIMIT,	offsetof(struct pt_regs, orig_addr_limit));
   DEFINE(S_ORIG_ADDR_LIMIT,	offsetof(struct pt_regs, orig_addr_limit));
+  DEFINE(S_STACKFRAME,		offsetof(struct pt_regs, stackframe));
   DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
   DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
   BLANK();
   BLANK();
   DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id.counter));
   DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id.counter));

+ 66 - 56
arch/arm64/kernel/entry.S

@@ -111,6 +111,18 @@
 	mrs	x23, spsr_el1
 	mrs	x23, spsr_el1
 	stp	lr, x21, [sp, #S_LR]
 	stp	lr, x21, [sp, #S_LR]
 
 
+	/*
+	 * In order to be able to dump the contents of struct pt_regs at the
+	 * time the exception was taken (in case we attempt to walk the call
+	 * stack later), chain it together with the stack frames.
+	 */
+	.if \el == 0
+	stp	xzr, xzr, [sp, #S_STACKFRAME]
+	.else
+	stp	x29, x22, [sp, #S_STACKFRAME]
+	.endif
+	add	x29, sp, #S_STACKFRAME
+
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 	/*
 	/*
 	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
 	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
@@ -263,14 +275,6 @@ alternative_else_nop_endif
 
 
 	/* switch to the irq stack */
 	/* switch to the irq stack */
 	mov	sp, x26
 	mov	sp, x26
-
-	/*
-	 * Add a dummy stack frame, this non-standard format is fixed up
-	 * by unwind_frame()
-	 */
-	stp     x29, x19, [sp, #-16]!
-	mov	x29, sp
-
 9998:
 9998:
 	.endm
 	.endm
 
 
@@ -350,7 +354,8 @@ END(vectors)
 	mov	x0, sp
 	mov	x0, sp
 	mov	x1, #\reason
 	mov	x1, #\reason
 	mrs	x2, esr_el1
 	mrs	x2, esr_el1
-	b	bad_mode
+	bl	bad_mode
+	ASM_BUG()
 	.endm
 	.endm
 
 
 el0_sync_invalid:
 el0_sync_invalid:
@@ -447,14 +452,16 @@ el1_sp_pc:
 	mrs	x0, far_el1
 	mrs	x0, far_el1
 	enable_dbg
 	enable_dbg
 	mov	x2, sp
 	mov	x2, sp
-	b	do_sp_pc_abort
+	bl	do_sp_pc_abort
+	ASM_BUG()
 el1_undef:
 el1_undef:
 	/*
 	/*
 	 * Undefined instruction
 	 * Undefined instruction
 	 */
 	 */
 	enable_dbg
 	enable_dbg
 	mov	x0, sp
 	mov	x0, sp
-	b	do_undefinstr
+	bl	do_undefinstr
+	ASM_BUG()
 el1_dbg:
 el1_dbg:
 	/*
 	/*
 	 * Debug exception handling
 	 * Debug exception handling
@@ -472,7 +479,8 @@ el1_inv:
 	mov	x0, sp
 	mov	x0, sp
 	mov	x2, x1
 	mov	x2, x1
 	mov	x1, #BAD_SYNC
 	mov	x1, #BAD_SYNC
-	b	bad_mode
+	bl	bad_mode
+	ASM_BUG()
 ENDPROC(el1_sync)
 ENDPROC(el1_sync)
 
 
 	.align	6
 	.align	6
@@ -705,38 +713,6 @@ el0_irq_naked:
 	b	ret_to_user
 	b	ret_to_user
 ENDPROC(el0_irq)
 ENDPROC(el0_irq)
 
 
-/*
- * Register switch for AArch64. The callee-saved registers need to be saved
- * and restored. On entry:
- *   x0 = previous task_struct (must be preserved across the switch)
- *   x1 = next task_struct
- * Previous and next are guaranteed not to be the same.
- *
- */
-ENTRY(cpu_switch_to)
-	mov	x10, #THREAD_CPU_CONTEXT
-	add	x8, x0, x10
-	mov	x9, sp
-	stp	x19, x20, [x8], #16		// store callee-saved registers
-	stp	x21, x22, [x8], #16
-	stp	x23, x24, [x8], #16
-	stp	x25, x26, [x8], #16
-	stp	x27, x28, [x8], #16
-	stp	x29, x9, [x8], #16
-	str	lr, [x8]
-	add	x8, x1, x10
-	ldp	x19, x20, [x8], #16		// restore callee-saved registers
-	ldp	x21, x22, [x8], #16
-	ldp	x23, x24, [x8], #16
-	ldp	x25, x26, [x8], #16
-	ldp	x27, x28, [x8], #16
-	ldp	x29, x9, [x8], #16
-	ldr	lr, [x8]
-	mov	sp, x9
-	msr	sp_el0, x1
-	ret
-ENDPROC(cpu_switch_to)
-
 /*
 /*
  * This is the fast syscall return path.  We do as little as possible here,
  * This is the fast syscall return path.  We do as little as possible here,
  * and this includes saving x0 back into the kernel stack.
  * and this includes saving x0 back into the kernel stack.
@@ -779,18 +755,6 @@ finish_ret_to_user:
 	kernel_exit 0
 	kernel_exit 0
 ENDPROC(ret_to_user)
 ENDPROC(ret_to_user)
 
 
-/*
- * This is how we return from a fork.
- */
-ENTRY(ret_from_fork)
-	bl	schedule_tail
-	cbz	x19, 1f				// not a kernel thread
-	mov	x0, x20
-	blr	x19
-1:	get_thread_info tsk
-	b	ret_to_user
-ENDPROC(ret_from_fork)
-
 /*
 /*
  * SVC handler.
  * SVC handler.
  */
  */
@@ -863,3 +827,49 @@ ENTRY(sys_rt_sigreturn_wrapper)
 	mov	x0, sp
 	mov	x0, sp
 	b	sys_rt_sigreturn
 	b	sys_rt_sigreturn
 ENDPROC(sys_rt_sigreturn_wrapper)
 ENDPROC(sys_rt_sigreturn_wrapper)
+
+/*
+ * Register switch for AArch64. The callee-saved registers need to be saved
+ * and restored. On entry:
+ *   x0 = previous task_struct (must be preserved across the switch)
+ *   x1 = next task_struct
+ * Previous and next are guaranteed not to be the same.
+ *
+ */
+ENTRY(cpu_switch_to)
+	mov	x10, #THREAD_CPU_CONTEXT
+	add	x8, x0, x10
+	mov	x9, sp
+	stp	x19, x20, [x8], #16		// store callee-saved registers
+	stp	x21, x22, [x8], #16
+	stp	x23, x24, [x8], #16
+	stp	x25, x26, [x8], #16
+	stp	x27, x28, [x8], #16
+	stp	x29, x9, [x8], #16
+	str	lr, [x8]
+	add	x8, x1, x10
+	ldp	x19, x20, [x8], #16		// restore callee-saved registers
+	ldp	x21, x22, [x8], #16
+	ldp	x23, x24, [x8], #16
+	ldp	x25, x26, [x8], #16
+	ldp	x27, x28, [x8], #16
+	ldp	x29, x9, [x8], #16
+	ldr	lr, [x8]
+	mov	sp, x9
+	msr	sp_el0, x1
+	ret
+ENDPROC(cpu_switch_to)
+NOKPROBE(cpu_switch_to)
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+	bl	schedule_tail
+	cbz	x19, 1f				// not a kernel thread
+	mov	x0, x20
+	blr	x19
+1:	get_thread_info tsk
+	b	ret_to_user
+ENDPROC(ret_from_fork)
+NOKPROBE(ret_from_fork)

+ 4 - 0
arch/arm64/kernel/head.S

@@ -362,6 +362,9 @@ __primary_switched:
 	ret					// to __primary_switch()
 	ret					// to __primary_switch()
 0:
 0:
 #endif
 #endif
+	add	sp, sp, #16
+	mov	x29, #0
+	mov	x30, #0
 	b	start_kernel
 	b	start_kernel
 ENDPROC(__primary_switched)
 ENDPROC(__primary_switched)
 
 
@@ -617,6 +620,7 @@ __secondary_switched:
 	ldr	x2, [x0, #CPU_BOOT_TASK]
 	ldr	x2, [x0, #CPU_BOOT_TASK]
 	msr	sp_el0, x2
 	msr	sp_el0, x2
 	mov	x29, #0
 	mov	x29, #0
+	mov	x30, #0
 	b	secondary_start_kernel
 	b	secondary_start_kernel
 ENDPROC(__secondary_switched)
 ENDPROC(__secondary_switched)
 
 

+ 0 - 1
arch/arm64/kernel/perf_callchain.c

@@ -162,7 +162,6 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
 	}
 	}
 
 
 	frame.fp = regs->regs[29];
 	frame.fp = regs->regs[29];
-	frame.sp = regs->sp;
 	frame.pc = regs->pc;
 	frame.pc = regs->pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	frame.graph = current->curr_ret_stack;
 	frame.graph = current->curr_ret_stack;

+ 1 - 4
arch/arm64/kernel/process.c

@@ -382,15 +382,12 @@ unsigned long get_wchan(struct task_struct *p)
 		return 0;
 		return 0;
 
 
 	frame.fp = thread_saved_fp(p);
 	frame.fp = thread_saved_fp(p);
-	frame.sp = thread_saved_sp(p);
 	frame.pc = thread_saved_pc(p);
 	frame.pc = thread_saved_pc(p);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	frame.graph = p->curr_ret_stack;
 	frame.graph = p->curr_ret_stack;
 #endif
 #endif
 	do {
 	do {
-		if (frame.sp < stack_page ||
-		    frame.sp >= stack_page + THREAD_SIZE ||
-		    unwind_frame(p, &frame))
+		if (unwind_frame(p, &frame))
 			goto out;
 			goto out;
 		if (!in_sched_functions(frame.pc)) {
 		if (!in_sched_functions(frame.pc)) {
 			ret = frame.pc;
 			ret = frame.pc;

+ 1 - 1
arch/arm64/kernel/ptrace.c

@@ -127,7 +127,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
 {
 {
 	return ((addr & ~(THREAD_SIZE - 1))  ==
 	return ((addr & ~(THREAD_SIZE - 1))  ==
 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
-		on_irq_stack(addr, raw_smp_processor_id());
+		on_irq_stack(addr);
 }
 }
 
 
 /**
 /**

+ 0 - 1
arch/arm64/kernel/return_address.c

@@ -42,7 +42,6 @@ void *return_address(unsigned int level)
 	data.addr = NULL;
 	data.addr = NULL;
 
 
 	frame.fp = (unsigned long)__builtin_frame_address(0);
 	frame.fp = (unsigned long)__builtin_frame_address(0);
-	frame.sp = current_stack_pointer;
 	frame.pc = (unsigned long)return_address; /* dummy */
 	frame.pc = (unsigned long)return_address; /* dummy */
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	frame.graph = current->curr_ret_stack;
 	frame.graph = current->curr_ret_stack;

+ 11 - 46
arch/arm64/kernel/stacktrace.c

@@ -42,9 +42,10 @@
  */
  */
 int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 {
 {
-	unsigned long high, low;
 	unsigned long fp = frame->fp;
 	unsigned long fp = frame->fp;
-	unsigned long irq_stack_ptr;
+
+	if (fp & 0xf)
+		return -EINVAL;
 
 
 	if (!tsk)
 	if (!tsk)
 		tsk = current;
 		tsk = current;
@@ -53,22 +54,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 	 * Switching between stacks is valid when tracing current and in
 	 * Switching between stacks is valid when tracing current and in
 	 * non-preemptible context.
 	 * non-preemptible context.
 	 */
 	 */
-	if (tsk == current && !preemptible())
-		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
-	else
-		irq_stack_ptr = 0;
-
-	low  = frame->sp;
-	/* irq stacks are not THREAD_SIZE aligned */
-	if (on_irq_stack(frame->sp, raw_smp_processor_id()))
-		high = irq_stack_ptr;
-	else
-		high = ALIGN(low, THREAD_SIZE) - 0x20;
-
-	if (fp < low || fp > high || fp & 0xf)
+	if (!(tsk == current && !preemptible() && on_irq_stack(fp)) &&
+	    !on_task_stack(tsk, fp))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	frame->sp = fp + 0x10;
 	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
 	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
 	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
 	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
 
 
@@ -86,34 +75,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 
 	/*
 	/*
-	 * Check whether we are going to walk through from interrupt stack
-	 * to task stack.
-	 * If we reach the end of the stack - and its an interrupt stack,
-	 * unpack the dummy frame to find the original elr.
-	 *
-	 * Check the frame->fp we read from the bottom of the irq_stack,
-	 * and the original task stack pointer are both in current->stack.
+	 * Frames created upon entry from EL0 have NULL FP and PC values, so
+	 * don't bother reporting these. Frames created by __noreturn functions
+	 * might have a valid FP even if PC is bogus, so only terminate where
+	 * both are NULL.
 	 */
 	 */
-	if (frame->sp == irq_stack_ptr) {
-		struct pt_regs *irq_args;
-		unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
-
-		if (object_is_on_stack((void *)orig_sp) &&
-		   object_is_on_stack((void *)frame->fp)) {
-			frame->sp = orig_sp;
-
-			/* orig_sp is the saved pt_regs, find the elr */
-			irq_args = (struct pt_regs *)orig_sp;
-			frame->pc = irq_args->pc;
-		} else {
-			/*
-			 * This frame has a non-standard format, and we
-			 * didn't fix it, because the data looked wrong.
-			 * Refuse to output this frame.
-			 */
-			return -EINVAL;
-		}
-	}
+	if (!frame->fp && !frame->pc)
+		return -EINVAL;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -167,7 +135,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 	data.no_sched_functions = 0;
 	data.no_sched_functions = 0;
 
 
 	frame.fp = regs->regs[29];
 	frame.fp = regs->regs[29];
-	frame.sp = regs->sp;
 	frame.pc = regs->pc;
 	frame.pc = regs->pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	frame.graph = current->curr_ret_stack;
 	frame.graph = current->curr_ret_stack;
@@ -192,12 +159,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 	if (tsk != current) {
 	if (tsk != current) {
 		data.no_sched_functions = 1;
 		data.no_sched_functions = 1;
 		frame.fp = thread_saved_fp(tsk);
 		frame.fp = thread_saved_fp(tsk);
-		frame.sp = thread_saved_sp(tsk);
 		frame.pc = thread_saved_pc(tsk);
 		frame.pc = thread_saved_pc(tsk);
 	} else {
 	} else {
 		data.no_sched_functions = 0;
 		data.no_sched_functions = 0;
 		frame.fp = (unsigned long)__builtin_frame_address(0);
 		frame.fp = (unsigned long)__builtin_frame_address(0);
-		frame.sp = current_stack_pointer;
 		frame.pc = (unsigned long)save_stack_trace_tsk;
 		frame.pc = (unsigned long)save_stack_trace_tsk;
 	}
 	}
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER

+ 0 - 1
arch/arm64/kernel/time.c

@@ -50,7 +50,6 @@ unsigned long profile_pc(struct pt_regs *regs)
 		return regs->pc;
 		return regs->pc;
 
 
 	frame.fp = regs->regs[29];
 	frame.fp = regs->regs[29];
-	frame.sp = regs->sp;
 	frame.pc = regs->pc;
 	frame.pc = regs->pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	frame.graph = -1; /* no task info */
 	frame.graph = -1; /* no task info */

+ 7 - 27
arch/arm64/kernel/traps.c

@@ -143,7 +143,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
 {
 	struct stackframe frame;
 	struct stackframe frame;
-	unsigned long irq_stack_ptr;
 	int skip;
 	int skip;
 
 
 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -154,25 +153,14 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 	if (!try_get_task_stack(tsk))
 	if (!try_get_task_stack(tsk))
 		return;
 		return;
 
 
-	/*
-	 * Switching between stacks is valid when tracing current and in
-	 * non-preemptible context.
-	 */
-	if (tsk == current && !preemptible())
-		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
-	else
-		irq_stack_ptr = 0;
-
 	if (tsk == current) {
 	if (tsk == current) {
 		frame.fp = (unsigned long)__builtin_frame_address(0);
 		frame.fp = (unsigned long)__builtin_frame_address(0);
-		frame.sp = current_stack_pointer;
 		frame.pc = (unsigned long)dump_backtrace;
 		frame.pc = (unsigned long)dump_backtrace;
 	} else {
 	} else {
 		/*
 		/*
 		 * task blocked in __switch_to
 		 * task blocked in __switch_to
 		 */
 		 */
 		frame.fp = thread_saved_fp(tsk);
 		frame.fp = thread_saved_fp(tsk);
-		frame.sp = thread_saved_sp(tsk);
 		frame.pc = thread_saved_pc(tsk);
 		frame.pc = thread_saved_pc(tsk);
 	}
 	}
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -182,13 +170,12 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 	skip = !!regs;
 	skip = !!regs;
 	printk("Call trace:\n");
 	printk("Call trace:\n");
 	while (1) {
 	while (1) {
-		unsigned long where = frame.pc;
 		unsigned long stack;
 		unsigned long stack;
 		int ret;
 		int ret;
 
 
 		/* skip until specified stack frame */
 		/* skip until specified stack frame */
 		if (!skip) {
 		if (!skip) {
-			dump_backtrace_entry(where);
+			dump_backtrace_entry(frame.pc);
 		} else if (frame.fp == regs->regs[29]) {
 		} else if (frame.fp == regs->regs[29]) {
 			skip = 0;
 			skip = 0;
 			/*
 			/*
@@ -203,20 +190,13 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 		ret = unwind_frame(tsk, &frame);
 		ret = unwind_frame(tsk, &frame);
 		if (ret < 0)
 		if (ret < 0)
 			break;
 			break;
-		stack = frame.sp;
-		if (in_exception_text(where)) {
-			/*
-			 * If we switched to the irq_stack before calling this
-			 * exception handler, then the pt_regs will be on the
-			 * task stack. The easiest way to tell is if the large
-			 * pt_regs would overlap with the end of the irq_stack.
-			 */
-			if (stack < irq_stack_ptr &&
-			    (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
-				stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+		if (in_entry_text(frame.pc)) {
+			stack = frame.fp - offsetof(struct pt_regs, stackframe);
 
 
-			dump_mem("", "Exception stack", stack,
-				 stack + sizeof(struct pt_regs));
+			if (on_task_stack(tsk, stack) ||
+			    (tsk == current && !preemptible() && on_irq_stack(stack)))
+				dump_mem("", "Exception stack", stack,
+					 stack + sizeof(struct pt_regs));
 		}
 		}
 	}
 	}