Explorar o código

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "Various fixes and post-merge window updates.  Included here are:
   - ensure Kconfig things which should be sorted remain sorted
   - fix three big-endian bugs which crept in during the last merge
     window
   - add the renameat2 syscall
   - fix big.LITTLE switcher initialisation checks
   - fix kdump vmcore for LPAE kernels"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: add renameat2 syscall
  ARM: keep arch/arm/Kconfig and arch/arm/mm/Kconfig select entries sorted
  ARM: 8033/1: fix big endian __pv_phys_pfn_offset size related issue
  ARM: 8032/1: bL_switcher: fix validation check before its activation
  ARM: 8030/1: ARM : kdump : add arch_crash_save_vmcoreinfo
  ARM: 8027/1: fix do_div() bug in big-endian systems
  ARM: 8026/1: Fix emulation of multiply accumulate instructions
  ARM: 8024/1: Keep DEBUG_UART_{PHYS,VIRT} entries sorted
Linus Torvalds %!s(int64=11) %!d(string=hai) anos
pai
achega
db725c88c7

+ 5 - 5
arch/arm/Kconfig

@@ -30,9 +30,9 @@ config ARM
 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_BPF_JIT
 	select HAVE_BPF_JIT
+	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_C_RECORDMCOUNT
-	select HAVE_CC_STACKPROTECTOR
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_ATTRS
@@ -422,8 +422,8 @@ config ARCH_EFM32
 	bool "Energy Micro efm32"
 	bool "Energy Micro efm32"
 	depends on !MMU
 	depends on !MMU
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
-	select AUTO_ZRELADDR
 	select ARM_NVIC
 	select ARM_NVIC
+	select AUTO_ZRELADDR
 	select CLKSRC_OF
 	select CLKSRC_OF
 	select COMMON_CLK
 	select COMMON_CLK
 	select CPU_V7M
 	select CPU_V7M
@@ -511,8 +511,8 @@ config ARCH_IXP4XX
 	bool "IXP4xx-based"
 	bool "IXP4xx-based"
 	depends on MMU
 	depends on MMU
 	select ARCH_HAS_DMA_SET_COHERENT_MASK
 	select ARCH_HAS_DMA_SET_COHERENT_MASK
-	select ARCH_SUPPORTS_BIG_ENDIAN
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
+	select ARCH_SUPPORTS_BIG_ENDIAN
 	select CLKSRC_MMIO
 	select CLKSRC_MMIO
 	select CPU_XSCALE
 	select CPU_XSCALE
 	select DMABOUNCE if PCI
 	select DMABOUNCE if PCI
@@ -1575,8 +1575,8 @@ config BIG_LITTLE
 config BL_SWITCHER
 config BL_SWITCHER
 	bool "big.LITTLE switcher support"
 	bool "big.LITTLE switcher support"
 	depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
 	depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
-	select CPU_PM
 	select ARM_CPU_SUSPEND
 	select ARM_CPU_SUSPEND
+	select CPU_PM
 	help
 	help
 	  The big.LITTLE "switcher" provides the core functionality to
 	  The big.LITTLE "switcher" provides the core functionality to
 	  transparently handle transition between a cluster of A15's
 	  transparently handle transition between a cluster of A15's
@@ -1920,9 +1920,9 @@ config XEN
 	depends on CPU_V7 && !CPU_V6
 	depends on CPU_V7 && !CPU_V6
 	depends on !GENERIC_ATOMIC64
 	depends on !GENERIC_ATOMIC64
 	depends on MMU
 	depends on MMU
+	select ARCH_DMA_ADDR_T_64BIT
 	select ARM_PSCI
 	select ARM_PSCI
 	select SWIOTLB_XEN
 	select SWIOTLB_XEN
-	select ARCH_DMA_ADDR_T_64BIT
 	help
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
 

+ 6 - 6
arch/arm/Kconfig.debug

@@ -1030,9 +1030,9 @@ config DEBUG_UART_PHYS
 	default 0x40100000 if DEBUG_PXA_UART1
 	default 0x40100000 if DEBUG_PXA_UART1
 	default 0x42000000 if ARCH_GEMINI
 	default 0x42000000 if ARCH_GEMINI
 	default 0x7c0003f8 if FOOTBRIDGE
 	default 0x7c0003f8 if FOOTBRIDGE
-	default 0x80230000 if DEBUG_PICOXCELL_UART
 	default 0x80070000 if DEBUG_IMX23_UART
 	default 0x80070000 if DEBUG_IMX23_UART
 	default 0x80074000 if DEBUG_IMX28_UART
 	default 0x80074000 if DEBUG_IMX28_UART
+	default 0x80230000 if DEBUG_PICOXCELL_UART
 	default 0x808c0000 if ARCH_EP93XX
 	default 0x808c0000 if ARCH_EP93XX
 	default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
 	default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
 	default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
 	default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
@@ -1096,22 +1096,22 @@ config DEBUG_UART_VIRT
 	default 0xfeb26000 if DEBUG_RK3X_UART1
 	default 0xfeb26000 if DEBUG_RK3X_UART1
 	default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
 	default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
 	default 0xfeb31000 if DEBUG_KEYSTONE_UART1
 	default 0xfeb31000 if DEBUG_KEYSTONE_UART1
-	default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
-	default 0xfed60000 if DEBUG_RK29_UART0
-	default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
-	default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
 	default 0xfec02000 if DEBUG_SOCFPGA_UART
 	default 0xfec02000 if DEBUG_SOCFPGA_UART
+	default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
 	default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
 	default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
 	default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
 	default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
 	default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2
 	default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2
 	default 0xfed12000 if ARCH_KIRKWOOD
 	default 0xfed12000 if ARCH_KIRKWOOD
+	default 0xfed60000 if DEBUG_RK29_UART0
+	default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
+	default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
 	default 0xfedc0000 if ARCH_EP93XX
 	default 0xfedc0000 if ARCH_EP93XX
 	default 0xfee003f8 if FOOTBRIDGE
 	default 0xfee003f8 if FOOTBRIDGE
 	default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
 	default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
-	default 0xfef36000 if DEBUG_HIGHBANK_UART
 	default 0xfee82340 if ARCH_IOP13XX
 	default 0xfee82340 if ARCH_IOP13XX
 	default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
 	default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
 	default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
 	default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+	default 0xfef36000 if DEBUG_HIGHBANK_UART
 	default 0xfefff700 if ARCH_IOP33X
 	default 0xfefff700 if ARCH_IOP33X
 	default 0xff003000 if DEBUG_U300_UART
 	default 0xff003000 if DEBUG_U300_UART
 	default DEBUG_UART_PHYS if !MMU
 	default DEBUG_UART_PHYS if !MMU

+ 2 - 4
arch/arm/common/bL_switcher.c

@@ -797,10 +797,8 @@ static int __init bL_switcher_init(void)
 {
 {
 	int ret;
 	int ret;
 
 
-	if (MAX_NR_CLUSTERS != 2) {
-		pr_err("%s: only dual cluster systems are supported\n", __func__);
-		return -EINVAL;
-	}
+	if (!mcpm_is_available())
+		return -ENODEV;
 
 
 	cpu_notifier(bL_switcher_hotplug_callback, 0);
 	cpu_notifier(bL_switcher_hotplug_callback, 0);
 
 

+ 5 - 0
arch/arm/common/mcpm_entry.c

@@ -48,6 +48,11 @@ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
 	return 0;
 	return 0;
 }
 }
 
 
+bool mcpm_is_available(void)
+{
+	return (platform_ops) ? true : false;
+}
+
 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
 {
 {
 	if (!platform_ops)
 	if (!platform_ops)

+ 1 - 1
arch/arm/include/asm/div64.h

@@ -156,7 +156,7 @@
 		/* Select the best insn combination to perform the   */	\
 		/* Select the best insn combination to perform the   */	\
 		/* actual __m * __n / (__p << 64) operation.         */	\
 		/* actual __m * __n / (__p << 64) operation.         */	\
 		if (!__c) {						\
 		if (!__c) {						\
-			asm (	"umull	%Q0, %R0, %1, %Q2\n\t"		\
+			asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"		\
 				"mov	%Q0, #0"			\
 				"mov	%Q0, #0"			\
 				: "=&r" (__res)				\
 				: "=&r" (__res)				\
 				: "r" (__m), "r" (__n)			\
 				: "r" (__m), "r" (__n)			\

+ 7 - 0
arch/arm/include/asm/mcpm.h

@@ -53,6 +53,13 @@ void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
  * CPU/cluster power operations API for higher subsystems to use.
  * CPU/cluster power operations API for higher subsystems to use.
  */
  */
 
 
+/**
+ * mcpm_is_available - returns whether MCPM is initialized and available
+ *
+ * This returns true or false accordingly.
+ */
+bool mcpm_is_available(void);
+
 /**
 /**
  * mcpm_cpu_power_up - make given CPU in given cluster runable
  * mcpm_cpu_power_up - make given CPU in given cluster runable
  *
  *

+ 1 - 0
arch/arm/include/uapi/asm/unistd.h

@@ -408,6 +408,7 @@
 #define __NR_finit_module		(__NR_SYSCALL_BASE+379)
 #define __NR_finit_module		(__NR_SYSCALL_BASE+379)
 #define __NR_sched_setattr		(__NR_SYSCALL_BASE+380)
 #define __NR_sched_setattr		(__NR_SYSCALL_BASE+380)
 #define __NR_sched_getattr		(__NR_SYSCALL_BASE+381)
 #define __NR_sched_getattr		(__NR_SYSCALL_BASE+381)
+#define __NR_renameat2			(__NR_SYSCALL_BASE+382)
 
 
 /*
 /*
  * This may need to be greater than __NR_last_syscall+1 in order to
  * This may need to be greater than __NR_last_syscall+1 in order to

+ 1 - 0
arch/arm/kernel/calls.S

@@ -391,6 +391,7 @@
 		CALL(sys_finit_module)
 		CALL(sys_finit_module)
 /* 380 */	CALL(sys_sched_setattr)
 /* 380 */	CALL(sys_sched_setattr)
 		CALL(sys_sched_getattr)
 		CALL(sys_sched_getattr)
+		CALL(sys_renameat2)
 #ifndef syscalls_counted
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
 #define syscalls_counted

+ 1 - 1
arch/arm/kernel/head.S

@@ -587,7 +587,7 @@ __fixup_pv_table:
 	add	r6, r6, r3	@ adjust __pv_phys_pfn_offset address
 	add	r6, r6, r3	@ adjust __pv_phys_pfn_offset address
 	add	r7, r7, r3	@ adjust __pv_offset address
 	add	r7, r7, r3	@ adjust __pv_offset address
 	mov	r0, r8, lsr #12	@ convert to PFN
 	mov	r0, r8, lsr #12	@ convert to PFN
-	str	r0, [r6, #LOW_OFFSET]	@ save computed PHYS_OFFSET to __pv_phys_pfn_offset
+	str	r0, [r6]	@ save computed PHYS_OFFSET to __pv_phys_pfn_offset
 	strcc	ip, [r7, #HIGH_OFFSET]	@ save to __pv_offset high bits
 	strcc	ip, [r7, #HIGH_OFFSET]	@ save to __pv_offset high bits
 	mov	r6, r3, lsr #24	@ constant for add/sub instructions
 	mov	r6, r3, lsr #24	@ constant for add/sub instructions
 	teq	r3, r6, lsl #24 @ must be 16MiB aligned
 	teq	r3, r6, lsl #24 @ must be 16MiB aligned

+ 7 - 0
arch/arm/kernel/machine_kexec.c

@@ -184,3 +184,10 @@ void machine_kexec(struct kimage *image)
 
 
 	soft_restart(reboot_entry_phys);
 	soft_restart(reboot_entry_phys);
 }
 }
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+	VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
+}

+ 4 - 4
arch/arm/mm/Kconfig

@@ -420,29 +420,29 @@ config CPU_32v3
 	bool
 	bool
 	select CPU_USE_DOMAINS if MMU
 	select CPU_USE_DOMAINS if MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-	select TLS_REG_EMUL if SMP || !MMU
 	select NEED_KUSER_HELPERS
 	select NEED_KUSER_HELPERS
+	select TLS_REG_EMUL if SMP || !MMU
 
 
 config CPU_32v4
 config CPU_32v4
 	bool
 	bool
 	select CPU_USE_DOMAINS if MMU
 	select CPU_USE_DOMAINS if MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-	select TLS_REG_EMUL if SMP || !MMU
 	select NEED_KUSER_HELPERS
 	select NEED_KUSER_HELPERS
+	select TLS_REG_EMUL if SMP || !MMU
 
 
 config CPU_32v4T
 config CPU_32v4T
 	bool
 	bool
 	select CPU_USE_DOMAINS if MMU
 	select CPU_USE_DOMAINS if MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-	select TLS_REG_EMUL if SMP || !MMU
 	select NEED_KUSER_HELPERS
 	select NEED_KUSER_HELPERS
+	select TLS_REG_EMUL if SMP || !MMU
 
 
 config CPU_32v5
 config CPU_32v5
 	bool
 	bool
 	select CPU_USE_DOMAINS if MMU
 	select CPU_USE_DOMAINS if MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-	select TLS_REG_EMUL if SMP || !MMU
 	select NEED_KUSER_HELPERS
 	select NEED_KUSER_HELPERS
+	select TLS_REG_EMUL if SMP || !MMU
 
 
 config CPU_32v6
 config CPU_32v6
 	bool
 	bool

+ 2 - 0
arch/arm/vfp/vfpdouble.c

@@ -866,6 +866,8 @@ vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, ch
 		vdp.sign = vfp_sign_negate(vdp.sign);
 		vdp.sign = vfp_sign_negate(vdp.sign);
 
 
 	vfp_double_unpack(&vdn, vfp_get_double(dd));
 	vfp_double_unpack(&vdn, vfp_get_double(dd));
+	if (vdn.exponent == 0 && vdn.significand)
+		vfp_double_normalise_denormal(&vdn);
 	if (negate & NEG_SUBTRACT)
 	if (negate & NEG_SUBTRACT)
 		vdn.sign = vfp_sign_negate(vdn.sign);
 		vdn.sign = vfp_sign_negate(vdn.sign);
 
 

+ 2 - 0
arch/arm/vfp/vfpsingle.c

@@ -915,6 +915,8 @@ vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, cha
 	v = vfp_get_float(sd);
 	v = vfp_get_float(sd);
 	pr_debug("VFP: s%u = %08x\n", sd, v);
 	pr_debug("VFP: s%u = %08x\n", sd, v);
 	vfp_single_unpack(&vsn, v);
 	vfp_single_unpack(&vsn, v);
+	if (vsn.exponent == 0 && vsn.significand)
+		vfp_single_normalise_denormal(&vsn);
 	if (negate & NEG_SUBTRACT)
 	if (negate & NEG_SUBTRACT)
 		vsn.sign = vfp_sign_negate(vsn.sign);
 		vsn.sign = vfp_sign_negate(vsn.sign);