فهرست منبع

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "Low priority fixes and updates for ARM:

   - add some missing includes

   - efficiency improvements in system call entry code when tracing is
     enabled

   - ensure ARMv6+ is always built as EABI

   - export save_stack_trace_tsk()

   - fix fatal signal handling during mm fault

   - build translation table base address register from scratch

   - appropriately align the .data section to a word boundary where we
     rely on that data being word aligned"

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 8691/1: Export save_stack_trace_tsk()
  ARM: 8692/1: mm: abort uaccess retries upon fatal signal
  ARM: 8690/1: lpae: build TTB control register value from scratch in v7_ttb_setup
  ARM: align .data section
  ARM: always enable AEABI for ARMv6+
  ARM: avoid saving and restoring registers unnecessarily
  ARM: move PC value into r9
  ARM: obtain thread info structure later
  ARM: use aliases for registers in entry-common
  ARM: 8689/1: scu: add missing errno include
  ARM: 8688/1: pm: add missing types include
Linus Torvalds 8 سال پیش
والد
کامیت
8fac2f96ab

+ 2 - 2
arch/arm/Kconfig

@@ -1531,7 +1531,6 @@ config THUMB2_KERNEL
 	bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
 	bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
 	depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
 	depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
 	default y if CPU_THUMBONLY
 	default y if CPU_THUMBONLY
-	select AEABI
 	select ARM_ASM_UNIFIED
 	select ARM_ASM_UNIFIED
 	select ARM_UNWIND
 	select ARM_UNWIND
 	help
 	help
@@ -1594,7 +1593,8 @@ config ARM_PATCH_IDIV
 	  code to do integer division.
 	  code to do integer division.
 
 
 config AEABI
 config AEABI
-	bool "Use the ARM EABI to compile the kernel"
+	bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
+	default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
 	help
 	help
 	  This option allows for the kernel to be compiled using the latest
 	  This option allows for the kernel to be compiled using the latest
 	  ARM ABI (aka EABI).  This is only useful if you are using a user
 	  ARM ABI (aka EABI).  This is only useful if you are using a user

+ 1 - 0
arch/arm/include/asm/smp_scu.h

@@ -7,6 +7,7 @@
 
 
 #ifndef __ASSEMBLER__
 #ifndef __ASSEMBLER__
 
 
+#include <linux/errno.h>
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 
 
 static inline bool scu_a9_has_base(void)
 static inline bool scu_a9_has_base(void)

+ 2 - 0
arch/arm/include/asm/suspend.h

@@ -1,6 +1,8 @@
 #ifndef __ASM_ARM_SUSPEND_H
 #ifndef __ASM_ARM_SUSPEND_H
 #define __ASM_ARM_SUSPEND_H
 #define __ASM_ARM_SUSPEND_H
 
 
+#include <linux/types.h>
+
 struct sleep_save_sp {
 struct sleep_save_sp {
 	u32 *save_ptr_stash;
 	u32 *save_ptr_stash;
 	u32 save_ptr_stash_phys;
 	u32 save_ptr_stash_phys;

+ 1 - 0
arch/arm/include/debug/omap2plus.S

@@ -22,6 +22,7 @@
 #define UART_OFFSET(addr)	((addr) & 0x00ffffff)
 #define UART_OFFSET(addr)	((addr) & 0x00ffffff)
 
 
 		.pushsection .data
 		.pushsection .data
+		.align	2
 omap_uart_phys:	.word	0
 omap_uart_phys:	.word	0
 omap_uart_virt:	.word	0
 omap_uart_virt:	.word	0
 omap_uart_lsr:	.word	0
 omap_uart_lsr:	.word	0

+ 2 - 0
arch/arm/kernel/entry-armv.S

@@ -721,6 +721,7 @@ do_fpe:
  */
  */
 
 
 	.pushsection .data
 	.pushsection .data
+	.align	2
 ENTRY(fp_enter)
 ENTRY(fp_enter)
 	.word	no_fp
 	.word	no_fp
 	.popsection
 	.popsection
@@ -1224,6 +1225,7 @@ vector_addrexcptn:
 	W(b)	vector_fiq
 	W(b)	vector_fiq
 
 
 	.data
 	.data
+	.align	2
 
 
 	.globl	cr_alignment
 	.globl	cr_alignment
 cr_alignment:
 cr_alignment:

+ 31 - 13
arch/arm/kernel/entry-common.S

@@ -27,6 +27,14 @@
 
 
 #include "entry-header.S"
 #include "entry-header.S"
 
 
+saved_psr	.req	r8
+#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
+saved_pc	.req	r9
+#define TRACE(x...) x
+#else
+saved_pc	.req	lr
+#define TRACE(x...)
+#endif
 
 
 	.align	5
 	.align	5
 #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
 #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
@@ -146,16 +154,17 @@ ENTRY(vector_swi)
  ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
  ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
  THUMB(	mov	r8, sp			)
  THUMB(	mov	r8, sp			)
  THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
  THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
-	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
-	str	lr, [sp, #S_PC]			@ Save calling PC
-	str	r8, [sp, #S_PSR]		@ Save CPSR
+	mrs	saved_psr, spsr			@ called from non-FIQ mode, so ok.
+ TRACE(	mov	saved_pc, lr		)
+	str	saved_pc, [sp, #S_PC]		@ Save calling PC
+	str	saved_psr, [sp, #S_PSR]		@ Save CPSR
 	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
 	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
 #endif
 #endif
 	zero_fp
 	zero_fp
 	alignment_trap r10, ip, __cr_alignment
 	alignment_trap r10, ip, __cr_alignment
-	enable_irq
-	ct_user_exit
-	get_thread_info tsk
+	asm_trace_hardirqs_on save=0
+	enable_irq_notrace
+	ct_user_exit save=0
 
 
 	/*
 	/*
 	 * Get the system call number.
 	 * Get the system call number.
@@ -168,11 +177,11 @@ ENTRY(vector_swi)
 	 * value to determine if it is an EABI or an old ABI call.
 	 * value to determine if it is an EABI or an old ABI call.
 	 */
 	 */
 #ifdef CONFIG_ARM_THUMB
 #ifdef CONFIG_ARM_THUMB
-	tst	r8, #PSR_T_BIT
+	tst	saved_psr, #PSR_T_BIT
 	movne	r10, #0				@ no thumb OABI emulation
 	movne	r10, #0				@ no thumb OABI emulation
- USER(	ldreq	r10, [lr, #-4]		)	@ get SWI instruction
+ USER(	ldreq	r10, [saved_pc, #-4]	)	@ get SWI instruction
 #else
 #else
- USER(	ldr	r10, [lr, #-4]		)	@ get SWI instruction
+ USER(	ldr	r10, [saved_pc, #-4]	)	@ get SWI instruction
 #endif
 #endif
  ARM_BE8(rev	r10, r10)			@ little endian instruction
  ARM_BE8(rev	r10, r10)			@ little endian instruction
 
 
@@ -183,15 +192,17 @@ ENTRY(vector_swi)
 	 */
 	 */
 #elif defined(CONFIG_ARM_THUMB)
 #elif defined(CONFIG_ARM_THUMB)
 	/* Legacy ABI only, possibly thumb mode. */
 	/* Legacy ABI only, possibly thumb mode. */
-	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
+	tst	saved_psr, #PSR_T_BIT		@ this is SPSR from save_user_regs
 	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
 	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
- USER(	ldreq	scno, [lr, #-4]		)
+ USER(	ldreq	scno, [saved_pc, #-4]	)
 
 
 #else
 #else
 	/* Legacy ABI only. */
 	/* Legacy ABI only. */
- USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
+ USER(	ldr	scno, [saved_pc, #-4]	)	@ get SWI instruction
 #endif
 #endif
 
 
+	/* saved_psr and saved_pc are now dead */
+
 	uaccess_disable tbl
 	uaccess_disable tbl
 
 
 	adr	tbl, sys_call_table		@ load syscall table pointer
 	adr	tbl, sys_call_table		@ load syscall table pointer
@@ -210,6 +221,12 @@ ENTRY(vector_swi)
 	bic	scno, scno, #0xff000000		@ mask off SWI op-code
 	bic	scno, scno, #0xff000000		@ mask off SWI op-code
 	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
 	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
 #endif
 #endif
+	get_thread_info tsk
+	/*
+	 * Reload the registers that may have been corrupted on entry to
+	 * the syscall assembly (by tracing or context tracking.)
+	 */
+ TRACE(	ldmia	sp, {r0 - r3}		)
 
 
 local_restart:
 local_restart:
 	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
 	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
@@ -239,8 +256,9 @@ local_restart:
 	 * current task.
 	 * current task.
 	 */
 	 */
 9001:
 9001:
-	sub	lr, lr, #4
+	sub	lr, saved_pc, #4
 	str	lr, [sp, #S_PC]
 	str	lr, [sp, #S_PC]
+	get_thread_info tsk
 	b	ret_fast_syscall
 	b	ret_fast_syscall
 #endif
 #endif
 ENDPROC(vector_swi)
 ENDPROC(vector_swi)

+ 2 - 0
arch/arm/kernel/head.S

@@ -556,6 +556,7 @@ ENDPROC(__fixup_smp)
 	.word	__smpalt_end
 	.word	__smpalt_end
 
 
 	.pushsection .data
 	.pushsection .data
+	.align	2
 	.globl	smp_on_up
 	.globl	smp_on_up
 smp_on_up:
 smp_on_up:
 	ALT_SMP(.long	1)
 	ALT_SMP(.long	1)
@@ -716,6 +717,7 @@ ENTRY(fixup_pv_table)
 ENDPROC(fixup_pv_table)
 ENDPROC(fixup_pv_table)
 
 
 	.data
 	.data
+	.align	2
 	.globl	__pv_phys_pfn_offset
 	.globl	__pv_phys_pfn_offset
 	.type	__pv_phys_pfn_offset, %object
 	.type	__pv_phys_pfn_offset, %object
 __pv_phys_pfn_offset:
 __pv_phys_pfn_offset:

+ 1 - 0
arch/arm/kernel/hyp-stub.S

@@ -31,6 +31,7 @@
  * zeroing of .bss would clobber it.
  * zeroing of .bss would clobber it.
  */
  */
 .data
 .data
+	.align	2
 ENTRY(__boot_cpu_mode)
 ENTRY(__boot_cpu_mode)
 	.long	0
 	.long	0
 .text
 .text

+ 1 - 0
arch/arm/kernel/iwmmxt.S

@@ -367,6 +367,7 @@ ENTRY(iwmmxt_task_release)
 ENDPROC(iwmmxt_task_release)
 ENDPROC(iwmmxt_task_release)
 
 
 	.data
 	.data
+	.align	2
 concan_owner:
 concan_owner:
 	.word	0
 	.word	0
 
 

+ 1 - 0
arch/arm/kernel/sleep.S

@@ -171,6 +171,7 @@ mpidr_hash_ptr:
 	.long	mpidr_hash - .			@ mpidr_hash struct offset
 	.long	mpidr_hash - .			@ mpidr_hash struct offset
 
 
 	.data
 	.data
+	.align	2
 	.type	sleep_save_sp, #object
 	.type	sleep_save_sp, #object
 ENTRY(sleep_save_sp)
 ENTRY(sleep_save_sp)
 	.space	SLEEP_SAVE_SP_SZ		@ struct sleep_save_sp
 	.space	SLEEP_SAVE_SP_SZ		@ struct sleep_save_sp

+ 1 - 0
arch/arm/kernel/stacktrace.c

@@ -171,6 +171,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
 {
 	__save_stack_trace(tsk, trace, 1);
 	__save_stack_trace(tsk, trace, 1);
 }
 }
+EXPORT_SYMBOL(save_stack_trace_tsk);
 
 
 void save_stack_trace(struct stack_trace *trace)
 void save_stack_trace(struct stack_trace *trace)
 {
 {

+ 1 - 0
arch/arm/mach-exynos/sleep.S

@@ -124,6 +124,7 @@ _cp15_save_diag:
 #endif /* CONFIG_CACHE_L2X0 */
 #endif /* CONFIG_CACHE_L2X0 */
 
 
 	.data
 	.data
+	.align	2
 	.globl cp15_save_diag
 	.globl cp15_save_diag
 cp15_save_diag:
 cp15_save_diag:
 	.long	0	@ cp15 diagnostic
 	.long	0	@ cp15 diagnostic

+ 2 - 0
arch/arm/mach-omap2/sleep34xx.S

@@ -530,10 +530,12 @@ l2dis_3630_offset:
 	.long	l2dis_3630 - .
 	.long	l2dis_3630 - .
 
 
 	.data
 	.data
+	.align	2
 l2dis_3630:
 l2dis_3630:
 	.word	0
 	.word	0
 
 
 	.data
 	.data
+	.align	2
 l2_inv_api_params:
 l2_inv_api_params:
 	.word	0x1, 0x00
 	.word	0x1, 0x00
 
 

+ 1 - 0
arch/arm/mach-omap2/sleep44xx.S

@@ -385,6 +385,7 @@ ppa_zero_params_offset:
 ENDPROC(omap_do_wfi)
 ENDPROC(omap_do_wfi)
 
 
 	.data
 	.data
+	.align	2
 ppa_zero_params:
 ppa_zero_params:
 	.word		0
 	.word		0
 
 

+ 2 - 0
arch/arm/mach-pxa/mioa701_bootresume.S

@@ -16,6 +16,7 @@
  *       insist on it to be truly read-only.
  *       insist on it to be truly read-only.
  */
  */
 	.data
 	.data
+	.align	2
 ENTRY(mioa701_bootstrap)
 ENTRY(mioa701_bootstrap)
 0:
 0:
 	b	1f
 	b	1f
@@ -34,4 +35,5 @@ ENTRY(mioa701_jumpaddr)
 
 
 ENTRY(mioa701_bootstrap_lg)
 ENTRY(mioa701_bootstrap_lg)
 	.data
 	.data
+	.align	2
 	.word	2b-0b
 	.word	2b-0b

+ 1 - 1
arch/arm/mach-rockchip/sleep.S

@@ -23,7 +23,7 @@
  * ddr to sram for system resumeing.
  * ddr to sram for system resumeing.
  * so it is ".data section".
  * so it is ".data section".
  */
  */
-.align
+	.align	2
 
 
 ENTRY(rockchip_slp_cpu_resume)
 ENTRY(rockchip_slp_cpu_resume)
 	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set svc, irqs off
 	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set svc, irqs off

+ 1 - 0
arch/arm/mm/cache-v4wb.S

@@ -47,6 +47,7 @@
 #define CACHE_DLIMIT	(CACHE_DSIZE * 4)
 #define CACHE_DLIMIT	(CACHE_DSIZE * 4)
 
 
 	.data
 	.data
+	.align	2
 flush_base:
 flush_base:
 	.long	FLUSH_BASE
 	.long	FLUSH_BASE
 	.text
 	.text

+ 4 - 1
arch/arm/mm/fault.c

@@ -315,8 +315,11 @@ retry:
 	 * signal first. We do not need to release the mmap_sem because
 	 * signal first. We do not need to release the mmap_sem because
 	 * it would already be released in __lock_page_or_retry in
 	 * it would already be released in __lock_page_or_retry in
 	 * mm/filemap.c. */
 	 * mm/filemap.c. */
-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+		if (!user_mode(regs))
+			goto no_context;
 		return 0;
 		return 0;
+	}
 
 
 	/*
 	/*
 	 * Major/minor page fault accounting is only done on the
 	 * Major/minor page fault accounting is only done on the

+ 1 - 2
arch/arm/mm/proc-v7-3level.S

@@ -129,8 +129,7 @@ ENDPROC(cpu_v7_set_pte_ext)
 	.macro	v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
 	.macro	v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
 	ldr	\tmp, =swapper_pg_dir		@ swapper_pg_dir virtual address
 	ldr	\tmp, =swapper_pg_dir		@ swapper_pg_dir virtual address
 	cmp	\ttbr1, \tmp, lsr #12		@ PHYS_OFFSET > PAGE_OFFSET?
 	cmp	\ttbr1, \tmp, lsr #12		@ PHYS_OFFSET > PAGE_OFFSET?
-	mrc	p15, 0, \tmp, c2, c0, 2		@ TTB control egister
-	orr	\tmp, \tmp, #TTB_EAE
+	mov	\tmp, #TTB_EAE			@ for TTB control egister
 	ALT_SMP(orr	\tmp, \tmp, #TTB_FLAGS_SMP)
 	ALT_SMP(orr	\tmp, \tmp, #TTB_FLAGS_SMP)
 	ALT_UP(orr	\tmp, \tmp, #TTB_FLAGS_UP)
 	ALT_UP(orr	\tmp, \tmp, #TTB_FLAGS_UP)
 	ALT_SMP(orr	\tmp, \tmp, #TTB_FLAGS_SMP << 16)
 	ALT_SMP(orr	\tmp, \tmp, #TTB_FLAGS_SMP << 16)

+ 1 - 0
arch/arm/mm/proc-xscale.S

@@ -104,6 +104,7 @@
 	.endm
 	.endm
 
 
 	.data
 	.data
+	.align	2
 clean_addr:	.word	CLEAN_ADDR
 clean_addr:	.word	CLEAN_ADDR
 
 
 	.text
 	.text