Browse Source

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "A number of fixes for the merge window, fixing a number of cases
  missed when testing the uaccess code, particularly cases which only
  show up with certain compiler versions"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: 8431/1: fix alignement of __bug_table section entries
  arm/xen: Enable user access to the kernel before issuing a privcmd call
  ARM: domains: add memory dependencies to get_domain/set_domain
  ARM: domains: thread_info.h no longer needs asm/domains.h
  ARM: uaccess: fix undefined instruction on ARMv7M/noMMU
  ARM: uaccess: remove unneeded uaccess_save_and_disable macro
  ARM: swpan: fix nwfpe for uaccess changes
  ARM: 8429/1: disable GCC SRA optimization
Linus Torvalds 10 years ago
parent
commit
57e6bbcb4b

+ 8 - 0
arch/arm/Makefile

@@ -54,6 +54,14 @@ AS		+= -EL
 LD		+= -EL
 LD		+= -EL
 endif
 endif
 
 
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS	+= $(call cc-option,-fno-ipa-sra)
+
 # This selects which instruction set is used.
 # This selects which instruction set is used.
 # Note that GCC does not numerically define an architecture version
 # Note that GCC does not numerically define an architecture version
 # macro, but instead defines a whole series of macros which makes
 # macro, but instead defines a whole series of macros which makes

+ 0 - 5
arch/arm/include/asm/assembler.h

@@ -491,11 +491,6 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 #endif
 #endif
 	.endm
 	.endm
 
 
-	.macro	uaccess_save_and_disable, tmp
-	uaccess_save \tmp
-	uaccess_disable \tmp
-	.endm
-
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.macro	ret\c, reg
 	.macro	ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
 #if __LINUX_ARM_ARCH__ < 6

+ 1 - 0
arch/arm/include/asm/bug.h

@@ -40,6 +40,7 @@ do {								\
 		"2:\t.asciz " #__file "\n" 			\
 		"2:\t.asciz " #__file "\n" 			\
 		".popsection\n" 				\
 		".popsection\n" 				\
 		".pushsection __bug_table,\"a\"\n"		\
 		".pushsection __bug_table,\"a\"\n"		\
+		".align 2\n"					\
 		"3:\t.word 1b, 2b\n"				\
 		"3:\t.word 1b, 2b\n"				\
 		"\t.hword " #__line ", 0\n"			\
 		"\t.hword " #__line ", 0\n"			\
 		".popsection");					\
 		".popsection");					\

+ 4 - 2
arch/arm/include/asm/domain.h

@@ -12,6 +12,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 #include <asm/barrier.h>
 #include <asm/barrier.h>
+#include <asm/thread_info.h>
 #endif
 #endif
 
 
 /*
 /*
@@ -89,7 +90,8 @@ static inline unsigned int get_domain(void)
 
 
 	asm(
 	asm(
 	"mrc	p15, 0, %0, c3, c0	@ get domain"
 	"mrc	p15, 0, %0, c3, c0	@ get domain"
-	 : "=r" (domain));
+	 : "=r" (domain)
+	 : "m" (current_thread_info()->cpu_domain));
 
 
 	return domain;
 	return domain;
 }
 }
@@ -98,7 +100,7 @@ static inline void set_domain(unsigned val)
 {
 {
 	asm volatile(
 	asm volatile(
 	"mcr	p15, 0, %0, c3, c0	@ set domain"
 	"mcr	p15, 0, %0, c3, c0	@ set domain"
-	  : : "r" (val));
+	  : : "r" (val) : "memory");
 	isb();
 	isb();
 }
 }
 
 

+ 0 - 1
arch/arm/include/asm/thread_info.h

@@ -25,7 +25,6 @@
 struct task_struct;
 struct task_struct;
 
 
 #include <asm/types.h>
 #include <asm/types.h>
-#include <asm/domain.h>
 
 
 typedef unsigned long mm_segment_t;
 typedef unsigned long mm_segment_t;
 
 

+ 2 - 0
arch/arm/kernel/process.c

@@ -226,6 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
 
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 	/*
 	/*
 	 * Copy the initial value of the domain access control register
 	 * Copy the initial value of the domain access control register
 	 * from the current thread: thread->addr_limit will have been
 	 * from the current thread: thread->addr_limit will have been
@@ -233,6 +234,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
 	 * kernel/fork.c
 	 * kernel/fork.c
 	 */
 	 */
 	thread->cpu_domain = get_domain();
 	thread->cpu_domain = get_domain();
+#endif
 
 
 	if (likely(!(p->flags & PF_KTHREAD))) {
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		*childregs = *current_pt_regs();

+ 2 - 1
arch/arm/nwfpe/entry.S

@@ -95,9 +95,10 @@ emulate:
 	reteq	r4			@ no, return failure
 	reteq	r4			@ no, return failure
 
 
 next:
 next:
+	uaccess_enable r3
 .Lx1:	ldrt	r6, [r5], #4		@ get the next instruction and
 .Lx1:	ldrt	r6, [r5], #4		@ get the next instruction and
 					@ increment PC
 					@ increment PC
-
+	uaccess_disable r3
 	and	r2, r6, #0x0F000000	@ test for FP insns
 	and	r2, r6, #0x0F000000	@ test for FP insns
 	teq	r2, #0x0C000000
 	teq	r2, #0x0C000000
 	teqne	r2, #0x0D000000
 	teqne	r2, #0x0D000000

+ 15 - 0
arch/arm/xen/hypercall.S

@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
 	mov r1, r2
 	mov r1, r2
 	mov r2, r3
 	mov r2, r3
 	ldr r3, [sp, #8]
 	ldr r3, [sp, #8]
+	/*
+	 * Privcmd calls are issued by the userspace. We need to allow the
+	 * kernel to access the userspace memory before issuing the hypercall.
+	 */
+	uaccess_enable r4
+
+	/* r4 is loaded now as we use it as scratch register before */
 	ldr r4, [sp, #4]
 	ldr r4, [sp, #4]
 	__HVC(XEN_IMM)
 	__HVC(XEN_IMM)
+
+	/*
+	 * Disable userspace access from kernel. This is fine to do it
+	 * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
+	 * called before.
+	 */
+	uaccess_disable r4
+
 	ldm sp!, {r4}
 	ldm sp!, {r4}
 	ret lr
 	ret lr
 ENDPROC(privcmd_call);
 ENDPROC(privcmd_call);