Эх сурвалжийг харах

Merge branches 'misc' and 'misc-rc6' into for-linus

Russell King 9 жил өмнө
parent
commit
598bcc6ea6

+ 9 - 4
Documentation/devicetree/bindings/arm/l2c2x0.txt

@@ -68,12 +68,17 @@ Optional properties:
   disable if zero.
   disable if zero.
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
   0-7, 15, 23, and 31.
   0-7, 15, 23, and 31.
-- arm,shared-override : The default behavior of the pl310 cache controller with
-  respect to the shareable attribute is to transform "normal memory
-  non-cacheable transactions" into "cacheable no allocate" (for reads) or
-  "write through no write allocate" (for writes).
+- arm,shared-override : The default behavior of the L220 or PL310 cache
+  controllers with respect to the shareable attribute is to transform "normal
+  memory non-cacheable transactions" into "cacheable no allocate" (for reads)
+  or "write through no write allocate" (for writes).
   On systems where this may cause DMA buffer corruption, this property must be
   On systems where this may cause DMA buffer corruption, this property must be
   specified to indicate that such transforms are precluded.
   specified to indicate that such transforms are precluded.
+- arm,parity-enable : enable parity checking on the L2 cache (L220 or PL310).
+- arm,parity-disable : disable parity checking on the L2 cache (L220 or PL310).
+- arm,outer-sync-disable : disable the outer sync operation on the L2 cache.
+  Some core tiles, especially ARM PB11MPCore have a faulty L220 cache that
+  will randomly hang unless outer sync operations are disabled.
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
   (forcibly enable), property absent (retain settings set by firmware)
   (forcibly enable), property absent (retain settings set by firmware)
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),

+ 25 - 5
arch/arm/Kconfig

@@ -33,10 +33,11 @@ config ARM
 	select HARDIRQS_SW_RESEND
 	select HARDIRQS_SW_RESEND
 	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
 	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
-	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
-	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
+	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
+	select HAVE_ARM_SMCCC if CPU_V7
 	select HAVE_BPF_JIT
 	select HAVE_BPF_JIT
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_CONTEXT_TRACKING
@@ -45,7 +46,7 @@ config ARM
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_API_DEBUG
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_ATTRS
 	select HAVE_DMA_CONTIGUOUS if MMU
 	select HAVE_DMA_CONTIGUOUS if MMU
-	select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32
+	select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
 	select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
 	select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
 	select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
 	select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
@@ -799,6 +800,7 @@ config ARCH_VIRT
 	bool "Dummy Virtual Machine" if ARCH_MULTI_V7
 	bool "Dummy Virtual Machine" if ARCH_MULTI_V7
 	select ARM_AMBA
 	select ARM_AMBA
 	select ARM_GIC
 	select ARM_GIC
+	select ARM_GIC_V2M if PCI_MSI
 	select ARM_GIC_V3
 	select ARM_GIC_V3
 	select ARM_PSCI
 	select ARM_PSCI
 	select HAVE_ARM_ARCH_TIMER
 	select HAVE_ARM_ARCH_TIMER
@@ -1422,7 +1424,7 @@ config BIG_LITTLE
 
 
 config BL_SWITCHER
 config BL_SWITCHER
 	bool "big.LITTLE switcher support"
 	bool "big.LITTLE switcher support"
-	depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+	depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
 	select ARM_CPU_SUSPEND
 	select ARM_CPU_SUSPEND
 	select CPU_PM
 	select CPU_PM
 	help
 	help
@@ -1481,7 +1483,7 @@ config HOTPLUG_CPU
 
 
 config ARM_PSCI
 config ARM_PSCI
 	bool "Support for the ARM Power State Coordination Interface (PSCI)"
 	bool "Support for the ARM Power State Coordination Interface (PSCI)"
-	depends on CPU_V7
+	depends on HAVE_ARM_SMCCC
 	select ARM_PSCI_FW
 	select ARM_PSCI_FW
 	help
 	help
 	  Say Y here if you want Linux to communicate with system firmware
 	  Say Y here if you want Linux to communicate with system firmware
@@ -1604,6 +1606,24 @@ config THUMB2_AVOID_R_ARM_THM_JUMP11
 config ARM_ASM_UNIFIED
 config ARM_ASM_UNIFIED
 	bool
 	bool
 
 
+config ARM_PATCH_IDIV
+	bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()"
+	depends on CPU_32v7 && !XIP_KERNEL
+	default y
+	help
+	  The ARM compiler inserts calls to __aeabi_idiv() and
+	  __aeabi_uidiv() when it needs to perform division on signed
+	  and unsigned integers. Some v7 CPUs have support for the sdiv
+	  and udiv instructions that can be used to implement those
+	  functions.
+
+	  Enabling this option allows the kernel to modify itself to
+	  replace the first two instructions of these library functions
+	  with the sdiv or udiv plus "bx lr" instructions when the CPU
+	  it is running on supports them. Typically this will be faster
+	  and less power intensive than running the original library
+	  code to do integer division.
+
 config AEABI
 config AEABI
 	bool "Use the ARM EABI to compile the kernel"
 	bool "Use the ARM EABI to compile the kernel"
 	help
 	help

+ 1 - 4
arch/arm/include/asm/bug.h

@@ -5,8 +5,6 @@
 #include <linux/types.h>
 #include <linux/types.h>
 #include <asm/opcodes.h>
 #include <asm/opcodes.h>
 
 
-#ifdef CONFIG_BUG
-
 /*
 /*
  * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
  * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
  * We need to be careful not to conflict with those used by other modules and
  * We need to be careful not to conflict with those used by other modules and
@@ -47,7 +45,7 @@ do {								\
 	unreachable();						\
 	unreachable();						\
 } while (0)
 } while (0)
 
 
-#else  /* not CONFIG_DEBUG_BUGVERBOSE */
+#else
 
 
 #define __BUG(__file, __line, __value)				\
 #define __BUG(__file, __line, __value)				\
 do {								\
 do {								\
@@ -57,7 +55,6 @@ do {								\
 #endif  /* CONFIG_DEBUG_BUGVERBOSE */
 #endif  /* CONFIG_DEBUG_BUGVERBOSE */
 
 
 #define HAVE_ARCH_BUG
 #define HAVE_ARCH_BUG
-#endif  /* CONFIG_BUG */
 
 
 #include <asm-generic/bug.h>
 #include <asm-generic/bug.h>
 
 

+ 1 - 1
arch/arm/include/asm/cpuidle.h

@@ -30,7 +30,7 @@ static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
 struct device_node;
 struct device_node;
 
 
 struct cpuidle_ops {
 struct cpuidle_ops {
-	int (*suspend)(int cpu, unsigned long arg);
+	int (*suspend)(unsigned long arg);
 	int (*init)(struct device_node *, int cpu);
 	int (*init)(struct device_node *, int cpu);
 };
 };
 
 

+ 1 - 1
arch/arm/include/asm/hardirq.h

@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <linux/threads.h>
 #include <asm/irq.h>
 #include <asm/irq.h>
 
 
-#define NR_IPI	8
+#define NR_IPI	7
 
 
 typedef struct {
 typedef struct {
 	unsigned int __softirq_pending;
 	unsigned int __softirq_pending;

+ 1 - 1
arch/arm/include/asm/psci.h

@@ -16,7 +16,7 @@
 
 
 extern struct smp_operations psci_smp_ops;
 extern struct smp_operations psci_smp_ops;
 
 
-#ifdef CONFIG_ARM_PSCI
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
 bool psci_smp_available(void);
 bool psci_smp_available(void);
 #else
 #else
 static inline bool psci_smp_available(void) { return false; }
 static inline bool psci_smp_available(void) { return false; }

+ 6 - 0
arch/arm/include/asm/setup.h

@@ -25,4 +25,10 @@ extern int arm_add_memory(u64 start, u64 size);
 extern void early_print(const char *str, ...);
 extern void early_print(const char *str, ...);
 extern void dump_machine_table(void);
 extern void dump_machine_table(void);
 
 
+#ifdef CONFIG_ATAGS_PROC
+extern void save_atags(const struct tag *tags);
+#else
+static inline void save_atags(const struct tag *tags) { }
+#endif
+
 #endif
 #endif

+ 2 - 2
arch/arm/kernel/Makefile

@@ -73,7 +73,6 @@ obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
 obj-$(CONFIG_PERF_EVENTS)	+= perf_regs.o perf_callchain.o
 obj-$(CONFIG_PERF_EVENTS)	+= perf_regs.o perf_callchain.o
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_xscale.o perf_event_v6.o \
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_xscale.o perf_event_v6.o \
 				   perf_event_v7.o
 				   perf_event_v7.o
-CFLAGS_pj4-cp0.o		:= -marm
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
 obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
 obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
 obj-$(CONFIG_VDSO)		+= vdso.o
 obj-$(CONFIG_VDSO)		+= vdso.o
@@ -88,8 +87,9 @@ obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 
 
 obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o
 obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o
 ifeq ($(CONFIG_ARM_PSCI),y)
 ifeq ($(CONFIG_ARM_PSCI),y)
-obj-y				+= psci-call.o
 obj-$(CONFIG_SMP)		+= psci_smp.o
 obj-$(CONFIG_SMP)		+= psci_smp.o
 endif
 endif
 
 
+obj-$(CONFIG_HAVE_ARM_SMCCC)	+= smccc-call.o
+
 extra-y := $(head-y) vmlinux.lds
 extra-y := $(head-y) vmlinux.lds

+ 6 - 0
arch/arm/kernel/armksyms.c

@@ -16,6 +16,7 @@
 #include <linux/syscalls.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/arm-smccc.h>
 
 
 #include <asm/checksum.h>
 #include <asm/checksum.h>
 #include <asm/ftrace.h>
 #include <asm/ftrace.h>
@@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
 EXPORT_SYMBOL(__pv_phys_pfn_offset);
 EXPORT_SYMBOL(__pv_phys_pfn_offset);
 EXPORT_SYMBOL(__pv_offset);
 EXPORT_SYMBOL(__pv_offset);
 #endif
 #endif
+
+#ifdef CONFIG_HAVE_ARM_SMCCC
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
+#endif

+ 0 - 6
arch/arm/kernel/atags.h

@@ -1,9 +1,3 @@
-#ifdef CONFIG_ATAGS_PROC
-extern void save_atags(struct tag *tags);
-#else
-static inline void save_atags(struct tag *tags) { }
-#endif
-
 void convert_to_tag_list(struct tag *tags);
 void convert_to_tag_list(struct tag *tags);
 
 
 #ifdef CONFIG_ATAGS
 #ifdef CONFIG_ATAGS

+ 1 - 1
arch/arm/kernel/cpuidle.c

@@ -56,7 +56,7 @@ int arm_cpuidle_suspend(int index)
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
 
 
 	if (cpuidle_ops[cpu].suspend)
 	if (cpuidle_ops[cpu].suspend)
-		ret = cpuidle_ops[cpu].suspend(cpu, index);
+		ret = cpuidle_ops[cpu].suspend(index);
 
 
 	return ret;
 	return ret;
 }
 }

+ 1 - 1
arch/arm/kernel/entry-v7m.S

@@ -88,7 +88,7 @@ __pendsv_entry:
 	@ execute the pending work, including reschedule
 	@ execute the pending work, including reschedule
 	get_thread_info tsk
 	get_thread_info tsk
 	mov	why, #0
 	mov	why, #0
-	b	ret_to_user
+	b	ret_to_user_from_irq
 ENDPROC(__pendsv_entry)
 ENDPROC(__pendsv_entry)
 
 
 /*
 /*

+ 4 - 0
arch/arm/kernel/pj4-cp0.c

@@ -66,9 +66,13 @@ static void __init pj4_cp_access_write(u32 value)
 
 
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 		"mcr	p15, 0, %1, c1, c0, 2\n\t"
 		"mcr	p15, 0, %1, c1, c0, 2\n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+		"isb\n\t"
+#else
 		"mrc	p15, 0, %0, c1, c0, 2\n\t"
 		"mrc	p15, 0, %0, c1, c0, 2\n\t"
 		"mov	%0, %0\n\t"
 		"mov	%0, %0\n\t"
 		"sub	pc, pc, #4\n\t"
 		"sub	pc, pc, #4\n\t"
+#endif
 		: "=r" (temp) : "r" (value));
 		: "=r" (temp) : "r" (value));
 }
 }
 
 

+ 0 - 31
arch/arm/kernel/psci-call.S

@@ -1,31 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2015 ARM Limited
- *
- * Author: Mark Rutland <mark.rutland@arm.com>
- */
-
-#include <linux/linkage.h>
-
-#include <asm/opcodes-sec.h>
-#include <asm/opcodes-virt.h>
-
-/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-ENTRY(__invoke_psci_fn_hvc)
-	__HVC(0)
-	bx	lr
-ENDPROC(__invoke_psci_fn_hvc)
-
-/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-ENTRY(__invoke_psci_fn_smc)
-	__SMC(0)
-	bx	lr
-ENDPROC(__invoke_psci_fn_smc)

+ 67 - 0
arch/arm/kernel/setup.c

@@ -375,6 +375,72 @@ void __init early_print(const char *str, ...)
 	printk("%s", buf);
 	printk("%s", buf);
 }
 }
 
 
+#ifdef CONFIG_ARM_PATCH_IDIV
+
+static inline u32 __attribute_const__ sdiv_instruction(void)
+{
+	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+		/* "sdiv r0, r0, r1" */
+		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
+		return __opcode_to_mem_thumb32(insn);
+	}
+
+	/* "sdiv r0, r0, r1" */
+	return __opcode_to_mem_arm(0xe710f110);
+}
+
+static inline u32 __attribute_const__ udiv_instruction(void)
+{
+	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+		/* "udiv r0, r0, r1" */
+		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
+		return __opcode_to_mem_thumb32(insn);
+	}
+
+	/* "udiv r0, r0, r1" */
+	return __opcode_to_mem_arm(0xe730f110);
+}
+
+static inline u32 __attribute_const__ bx_lr_instruction(void)
+{
+	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+		/* "bx lr; nop" */
+		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
+		return __opcode_to_mem_thumb32(insn);
+	}
+
+	/* "bx lr" */
+	return __opcode_to_mem_arm(0xe12fff1e);
+}
+
+static void __init patch_aeabi_idiv(void)
+{
+	extern void __aeabi_uidiv(void);
+	extern void __aeabi_idiv(void);
+	uintptr_t fn_addr;
+	unsigned int mask;
+
+	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
+	if (!(elf_hwcap & mask))
+		return;
+
+	pr_info("CPU: div instructions available: patching division code\n");
+
+	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
+	((u32 *)fn_addr)[0] = udiv_instruction();
+	((u32 *)fn_addr)[1] = bx_lr_instruction();
+	flush_icache_range(fn_addr, fn_addr + 8);
+
+	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
+	((u32 *)fn_addr)[0] = sdiv_instruction();
+	((u32 *)fn_addr)[1] = bx_lr_instruction();
+	flush_icache_range(fn_addr, fn_addr + 8);
+}
+
+#else
+static inline void patch_aeabi_idiv(void) { }
+#endif
+
 static void __init cpuid_init_hwcaps(void)
 static void __init cpuid_init_hwcaps(void)
 {
 {
 	int block;
 	int block;
@@ -642,6 +708,7 @@ static void __init setup_processor(void)
 	elf_hwcap = list->elf_hwcap;
 	elf_hwcap = list->elf_hwcap;
 
 
 	cpuid_init_hwcaps();
 	cpuid_init_hwcaps();
+	patch_aeabi_idiv();
 
 
 #ifndef CONFIG_ARM_THUMB
 #ifndef CONFIG_ARM_THUMB
 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);

+ 62 - 0
arch/arm/kernel/smccc-call.S

@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/linkage.h>
+
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+#include <asm/unwind.h>
+
+	/*
+	 * Wrap c macros in asm macros to delay expansion until after the
+	 * SMCCC asm macro is expanded.
+	 */
+	.macro SMCCC_SMC
+	__SMC(0)
+	.endm
+
+	.macro SMCCC_HVC
+	__HVC(0)
+	.endm
+
+	.macro SMCCC instr
+UNWIND(	.fnstart)
+	mov	r12, sp
+	push	{r4-r7}
+UNWIND(	.save	{r4-r7})
+	ldm	r12, {r4-r7}
+	\instr
+	pop	{r4-r7}
+	ldr	r12, [sp, #(4 * 4)]
+	stm	r12, {r0-r3}
+	bx	lr
+UNWIND(	.fnend)
+	.endm
+
+/*
+ * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *		  unsigned long a3, unsigned long a4, unsigned long a5,
+ *		  unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_smc)
+	SMCCC SMCCC_SMC
+ENDPROC(arm_smccc_smc)
+
+/*
+ * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *		  unsigned long a3, unsigned long a4, unsigned long a5,
+ *		  unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_hvc)
+	SMCCC SMCCC_HVC
+ENDPROC(arm_smccc_hvc)

+ 7 - 10
arch/arm/kernel/smp.c

@@ -69,11 +69,15 @@ enum ipi_msg_type {
 	IPI_TIMER,
 	IPI_TIMER,
 	IPI_RESCHEDULE,
 	IPI_RESCHEDULE,
 	IPI_CALL_FUNC,
 	IPI_CALL_FUNC,
-	IPI_CALL_FUNC_SINGLE,
 	IPI_CPU_STOP,
 	IPI_CPU_STOP,
 	IPI_IRQ_WORK,
 	IPI_IRQ_WORK,
 	IPI_COMPLETION,
 	IPI_COMPLETION,
-	IPI_CPU_BACKTRACE = 15,
+	IPI_CPU_BACKTRACE,
+	/*
+	 * SGI8-15 can be reserved by secure firmware, and thus may
+	 * not be usable by the kernel. Please keep the above limited
+	 * to at most 8 entries.
+	 */
 };
 };
 
 
 static DECLARE_COMPLETION(cpu_running);
 static DECLARE_COMPLETION(cpu_running);
@@ -475,7 +479,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
 	S(IPI_TIMER, "Timer broadcast interrupts"),
 	S(IPI_TIMER, "Timer broadcast interrupts"),
 	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
 	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
 	S(IPI_CALL_FUNC, "Function call interrupts"),
 	S(IPI_CALL_FUNC, "Function call interrupts"),
-	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
 	S(IPI_CPU_STOP, "CPU stop interrupts"),
 	S(IPI_CPU_STOP, "CPU stop interrupts"),
 	S(IPI_IRQ_WORK, "IRQ work interrupts"),
 	S(IPI_IRQ_WORK, "IRQ work interrupts"),
 	S(IPI_COMPLETION, "completion interrupts"),
 	S(IPI_COMPLETION, "completion interrupts"),
@@ -525,7 +528,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 
 
 void arch_send_call_function_single_ipi(int cpu)
 void arch_send_call_function_single_ipi(int cpu)
 {
 {
-	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 }
 
 
 #ifdef CONFIG_IRQ_WORK
 #ifdef CONFIG_IRQ_WORK
@@ -620,12 +623,6 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 		irq_exit();
 		irq_exit();
 		break;
 		break;
 
 
-	case IPI_CALL_FUNC_SINGLE:
-		irq_enter();
-		generic_smp_call_function_single_interrupt();
-		irq_exit();
-		break;
-
 	case IPI_CPU_STOP:
 	case IPI_CPU_STOP:
 		irq_enter();
 		irq_enter();
 		ipi_cpu_stop(cpu);
 		ipi_cpu_stop(cpu);

+ 1 - 1
arch/arm/kernel/vdso.c

@@ -224,7 +224,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr)
 				       VM_READ | VM_MAYREAD,
 				       VM_READ | VM_MAYREAD,
 				       &vdso_data_mapping);
 				       &vdso_data_mapping);
 
 
-	return IS_ERR(vma) ? PTR_ERR(vma) : 0;
+	return PTR_ERR_OR_ZERO(vma);
 }
 }
 
 
 /* assumes mmap_sem is write-locked */
 /* assumes mmap_sem is write-locked */

+ 8 - 0
arch/arm/lib/lib1funcs.S

@@ -205,6 +205,10 @@ Boston, MA 02111-1307, USA.  */
 .endm
 .endm
 
 
 
 
+#ifdef CONFIG_ARM_PATCH_IDIV
+	.align	3
+#endif
+
 ENTRY(__udivsi3)
 ENTRY(__udivsi3)
 ENTRY(__aeabi_uidiv)
 ENTRY(__aeabi_uidiv)
 UNWIND(.fnstart)
 UNWIND(.fnstart)
@@ -253,6 +257,10 @@ UNWIND(.fnstart)
 UNWIND(.fnend)
 UNWIND(.fnend)
 ENDPROC(__umodsi3)
 ENDPROC(__umodsi3)
 
 
+#ifdef CONFIG_ARM_PATCH_IDIV
+	.align 3
+#endif
+
 ENTRY(__divsi3)
 ENTRY(__divsi3)
 ENTRY(__aeabi_idiv)
 ENTRY(__aeabi_idiv)
 UNWIND(.fnstart)
 UNWIND(.fnstart)

+ 11 - 1
arch/arm/mach-omap2/board-generic.c

@@ -16,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
 #include <linux/irqdomain.h>
 #include <linux/irqdomain.h>
 
 
+#include <asm/setup.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
 
 
 #include "common.h"
 #include "common.h"
@@ -76,8 +77,17 @@ static const char *const n900_boards_compat[] __initconst = {
 	NULL,
 	NULL,
 };
 };
 
 
+/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags,
+ * save them while the data is still not overwritten
+ */
+static void __init rx51_reserve(void)
+{
+	save_atags((const struct tag *)(PAGE_OFFSET + 0x100));
+	omap_reserve();
+}
+
 DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
 DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
-	.reserve	= omap_reserve,
+	.reserve	= rx51_reserve,
 	.map_io		= omap3_map_io,
 	.map_io		= omap3_map_io,
 	.init_early	= omap3430_init_early,
 	.init_early	= omap3430_init_early,
 	.init_machine	= omap_generic_init,
 	.init_machine	= omap_generic_init,

+ 30 - 3
arch/arm/mm/cache-l2x0.c

@@ -790,7 +790,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
 };
 };
 
 
 static int __init __l2c_init(const struct l2c_init_data *data,
 static int __init __l2c_init(const struct l2c_init_data *data,
-			     u32 aux_val, u32 aux_mask, u32 cache_id)
+			     u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
 {
 {
 	struct outer_cache_fns fns;
 	struct outer_cache_fns fns;
 	unsigned way_size_bits, ways;
 	unsigned way_size_bits, ways;
@@ -866,6 +866,10 @@ static int __init __l2c_init(const struct l2c_init_data *data,
 	fns.configure = outer_cache.configure;
 	fns.configure = outer_cache.configure;
 	if (data->fixup)
 	if (data->fixup)
 		data->fixup(l2x0_base, cache_id, &fns);
 		data->fixup(l2x0_base, cache_id, &fns);
+	if (nosync) {
+		pr_info("L2C: disabling outer sync\n");
+		fns.sync = NULL;
+	}
 
 
 	/*
 	/*
 	 * Check if l2x0 controller is already enabled.  If we are booting
 	 * Check if l2x0 controller is already enabled.  If we are booting
@@ -925,7 +929,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 	if (data->save)
 	if (data->save)
 		data->save(l2x0_base);
 		data->save(l2x0_base);
 
 
-	__l2c_init(data, aux_val, aux_mask, cache_id);
+	__l2c_init(data, aux_val, aux_mask, cache_id, false);
 }
 }
 
 
 #ifdef CONFIG_OF
 #ifdef CONFIG_OF
@@ -1060,6 +1064,18 @@ static void __init l2x0_of_parse(const struct device_node *np,
 		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
 		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
 	}
 	}
 
 
+	if (of_property_read_bool(np, "arm,parity-enable")) {
+		mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+		val |= L2C_AUX_CTRL_PARITY_ENABLE;
+	} else if (of_property_read_bool(np, "arm,parity-disable")) {
+		mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+	}
+
+	if (of_property_read_bool(np, "arm,shared-override")) {
+		mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+		val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+	}
+
 	ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
 	ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
 	if (ret)
 	if (ret)
 		return;
 		return;
@@ -1176,6 +1192,14 @@ static void __init l2c310_of_parse(const struct device_node *np,
 		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
 		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
 	}
 	}
 
 
+	if (of_property_read_bool(np, "arm,parity-enable")) {
+		*aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
+		*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+	} else if (of_property_read_bool(np, "arm,parity-disable")) {
+		*aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+		*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
+	}
+
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
 
 
 	ret = of_property_read_u32(np, "arm,double-linefill", &val);
 	ret = of_property_read_u32(np, "arm,double-linefill", &val);
@@ -1704,6 +1728,7 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 	struct resource res;
 	struct resource res;
 	u32 cache_id, old_aux;
 	u32 cache_id, old_aux;
 	u32 cache_level = 2;
 	u32 cache_level = 2;
+	bool nosync = false;
 
 
 	np = of_find_matching_node(NULL, l2x0_ids);
 	np = of_find_matching_node(NULL, l2x0_ids);
 	if (!np)
 	if (!np)
@@ -1742,6 +1767,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 	if (cache_level != 2)
 	if (cache_level != 2)
 		pr_err("L2C: device tree specifies invalid cache level\n");
 		pr_err("L2C: device tree specifies invalid cache level\n");
 
 
+	nosync = of_property_read_bool(np, "arm,outer-sync-disable");
+
 	/* Read back current (default) hardware configuration */
 	/* Read back current (default) hardware configuration */
 	if (data->save)
 	if (data->save)
 		data->save(l2x0_base);
 		data->save(l2x0_base);
@@ -1756,6 +1783,6 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 	else
 	else
 		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
 		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
 
 
-	return __l2c_init(data, aux_val, aux_mask, cache_id);
+	return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
 }
 }
 #endif
 #endif

+ 1 - 12
arch/arm/mm/cache-uniphier.c

@@ -377,17 +377,6 @@ static const struct of_device_id uniphier_cache_match[] __initconst = {
 	{ /* sentinel */ }
 	{ /* sentinel */ }
 };
 };
 
 
-static struct device_node * __init uniphier_cache_get_next_level_node(
-							struct device_node *np)
-{
-	u32 phandle;
-
-	if (of_property_read_u32(np, "next-level-cache", &phandle))
-		return NULL;
-
-	return of_find_node_by_phandle(phandle);
-}
-
 static int __init __uniphier_cache_init(struct device_node *np,
 static int __init __uniphier_cache_init(struct device_node *np,
 					unsigned int *cache_level)
 					unsigned int *cache_level)
 {
 {
@@ -491,7 +480,7 @@ static int __init __uniphier_cache_init(struct device_node *np,
 	 * next level cache fails because we want to continue with available
 	 * next level cache fails because we want to continue with available
 	 * cache levels.
 	 * cache levels.
 	 */
 	 */
-	next_np = uniphier_cache_get_next_level_node(np);
+	next_np = of_find_next_cache_node(np);
 	if (next_np) {
 	if (next_np) {
 		(*cache_level)++;
 		(*cache_level)++;
 		ret = __uniphier_cache_init(next_np, cache_level);
 		ret = __uniphier_cache_init(next_np, cache_level);

+ 1 - 1
arch/arm/mm/mmu.c

@@ -572,7 +572,7 @@ static void __init build_mem_type_table(void)
 	 * in the Short-descriptor translation table format descriptors.
 	 * in the Short-descriptor translation table format descriptors.
 	 */
 	 */
 	if (cpu_arch == CPU_ARCH_ARMv7 &&
 	if (cpu_arch == CPU_ARCH_ARMv7 &&
-		(read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
+		(read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
 		user_pmd_table |= PMD_PXNTABLE;
 		user_pmd_table |= PMD_PXNTABLE;
 	}
 	}
 #endif
 #endif

+ 16 - 7
arch/arm/mm/proc-v7.S

@@ -274,10 +274,12 @@ __v7_ca15mp_setup:
 __v7_b15mp_setup:
 __v7_b15mp_setup:
 __v7_ca17mp_setup:
 __v7_ca17mp_setup:
 	mov	r10, #0
 	mov	r10, #0
-1:	adr	r12, __v7_setup_stack		@ the local stack
-	stmia	r12, {r0-r5, lr}		@ v7_invalidate_l1 touches r0-r6
+1:	adr	r0, __v7_setup_stack_ptr
+	ldr	r12, [r0]
+	add	r12, r12, r0			@ the local stack
+	stmia	r12, {r1-r6, lr}		@ v7_invalidate_l1 touches r0-r6
 	bl      v7_invalidate_l1
 	bl      v7_invalidate_l1
-	ldmia	r12, {r0-r5, lr}
+	ldmia	r12, {r1-r6, lr}
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	ALT_SMP(mrc	p15, 0, r0, c1, c0, 1)
 	ALT_SMP(mrc	p15, 0, r0, c1, c0, 1)
 	ALT_UP(mov	r0, #(1 << 6))		@ fake it for UP
 	ALT_UP(mov	r0, #(1 << 6))		@ fake it for UP
@@ -415,10 +417,12 @@ __v7_pj4b_setup:
 #endif /* CONFIG_CPU_PJ4B */
 #endif /* CONFIG_CPU_PJ4B */
 
 
 __v7_setup:
 __v7_setup:
-	adr	r12, __v7_setup_stack		@ the local stack
-	stmia	r12, {r0-r5, lr}		@ v7_invalidate_l1 touches r0-r6
+	adr	r0, __v7_setup_stack_ptr
+	ldr	r12, [r0]
+	add	r12, r12, r0			@ the local stack
+	stmia	r12, {r1-r6, lr}		@ v7_invalidate_l1 touches r0-r6
 	bl      v7_invalidate_l1
 	bl      v7_invalidate_l1
-	ldmia	r12, {r0-r5, lr}
+	ldmia	r12, {r1-r6, lr}
 
 
 __v7_setup_cont:
 __v7_setup_cont:
 	and	r0, r9, #0xff000000		@ ARM?
 	and	r0, r9, #0xff000000		@ ARM?
@@ -480,11 +484,16 @@ __errata_finish:
 	orr	r0, r0, r6			@ set them
 	orr	r0, r0, r6			@ set them
  THUMB(	orr	r0, r0, #1 << 30	)	@ Thumb exceptions
  THUMB(	orr	r0, r0, #1 << 30	)	@ Thumb exceptions
 	ret	lr				@ return to head.S:__ret
 	ret	lr				@ return to head.S:__ret
+
+	.align	2
+__v7_setup_stack_ptr:
+	.word	__v7_setup_stack - .
 ENDPROC(__v7_setup)
 ENDPROC(__v7_setup)
 
 
+	.bss
 	.align	2
 	.align	2
 __v7_setup_stack:
 __v7_setup_stack:
-	.space	4 * 7				@ 12 registers
+	.space	4 * 7				@ 7 registers
 
 
 	__INITDATA
 	__INITDATA
 
 

+ 5 - 9
arch/arm/mm/proc-v7m.S

@@ -12,6 +12,7 @@
  */
  */
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
+#include <asm/memory.h>
 #include <asm/v7m.h>
 #include <asm/v7m.h>
 #include "proc-macros.S"
 #include "proc-macros.S"
 
 
@@ -97,19 +98,19 @@ __v7m_setup:
 	mov	r5, #0x00800000
 	mov	r5, #0x00800000
 	str	r5, [r0, V7M_SCB_SHPR3]	@ set PendSV priority
 	str	r5, [r0, V7M_SCB_SHPR3]	@ set PendSV priority
 
 
-	@ SVC to run the kernel in this mode
+	@ SVC to switch to handler mode. Notice that this requires sp to
+	@ point to writeable memory because the processor saves
+	@ some registers to the stack.
 	badr	r1, 1f
 	badr	r1, 1f
 	ldr	r5, [r12, #11 * 4]	@ read the SVC vector entry
 	ldr	r5, [r12, #11 * 4]	@ read the SVC vector entry
 	str	r1, [r12, #11 * 4]	@ write the temporary SVC vector entry
 	str	r1, [r12, #11 * 4]	@ write the temporary SVC vector entry
 	mov	r6, lr			@ save LR
 	mov	r6, lr			@ save LR
-	mov	r7, sp			@ save SP
-	ldr	sp, =__v7m_setup_stack_top
+	ldr	sp, =init_thread_union + THREAD_START_SP
 	cpsie	i
 	cpsie	i
 	svc	#0
 	svc	#0
 1:	cpsid	i
 1:	cpsid	i
 	str	r5, [r12, #11 * 4]	@ restore the original SVC vector entry
 	str	r5, [r12, #11 * 4]	@ restore the original SVC vector entry
 	mov	lr, r6			@ restore LR
 	mov	lr, r6			@ restore LR
-	mov	sp, r7			@ restore SP
 
 
 	@ Special-purpose control register
 	@ Special-purpose control register
 	mov	r1, #1
 	mov	r1, #1
@@ -123,11 +124,6 @@ __v7m_setup:
 	ret	lr
 	ret	lr
 ENDPROC(__v7m_setup)
 ENDPROC(__v7m_setup)
 
 
-	.align 2
-__v7m_setup_stack:
-	.space	4 * 8				@ 8 registers
-__v7m_setup_stack_top:
-
 	define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
 	define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
 
 
 	.section ".rodata"
 	.section ".rodata"

+ 1 - 0
arch/arm64/Kconfig

@@ -92,6 +92,7 @@ config ARM64
 	select SPARSE_IRQ
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
 	select SYSCTL_EXCEPTION_TRACE
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_CONTEXT_TRACKING
+	select HAVE_ARM_SMCCC
 	help
 	help
 	  ARM 64-bit (AArch64) Linux support.
 	  ARM 64-bit (AArch64) Linux support.
 
 

+ 2 - 2
arch/arm64/kernel/Makefile

@@ -14,10 +14,10 @@ CFLAGS_REMOVE_return_address.o = -pg
 arm64-obj-y		:= debug-monitors.o entry.o irq.o fpsimd.o		\
 arm64-obj-y		:= debug-monitors.o entry.o irq.o fpsimd.o		\
 			   entry-fpsimd.o process.o ptrace.o setup.o signal.o	\
 			   entry-fpsimd.o process.o ptrace.o setup.o signal.o	\
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
-			   hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o	\
+			   hyp-stub.o psci.o cpu_ops.o insn.o	\
 			   return_address.o cpuinfo.o cpu_errata.o		\
 			   return_address.o cpuinfo.o cpu_errata.o		\
 			   cpufeature.o alternative.o cacheinfo.o		\
 			   cpufeature.o alternative.o cacheinfo.o		\
-			   smp.o smp_spin_table.o topology.o
+			   smp.o smp_spin_table.o topology.o smccc-call.o
 
 
 extra-$(CONFIG_EFI)			:= efi-entry.o
 extra-$(CONFIG_EFI)			:= efi-entry.o
 
 

+ 5 - 0
arch/arm64/kernel/arm64ksyms.c

@@ -26,6 +26,7 @@
 #include <linux/syscalls.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/arm-smccc.h>
 
 
 #include <asm/checksum.h>
 #include <asm/checksum.h>
 
 
@@ -68,3 +69,7 @@ EXPORT_SYMBOL(test_and_change_bit);
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(_mcount);
 EXPORT_SYMBOL(_mcount);
 #endif
 #endif
+
+	/* arm-smccc */
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);

+ 3 - 0
arch/arm64/kernel/asm-offsets.c

@@ -28,6 +28,7 @@
 #include <asm/suspend.h>
 #include <asm/suspend.h>
 #include <asm/vdso_datapage.h>
 #include <asm/vdso_datapage.h>
 #include <linux/kbuild.h>
 #include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
 
 
 int main(void)
 int main(void)
 {
 {
@@ -161,5 +162,7 @@ int main(void)
   DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
   DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
   DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
   DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
 #endif
 #endif
+  DEFINE(ARM_SMCCC_RES_X0_OFFS,	offsetof(struct arm_smccc_res, a0));
+  DEFINE(ARM_SMCCC_RES_X2_OFFS,	offsetof(struct arm_smccc_res, a2));
   return 0;
   return 0;
 }
 }

+ 0 - 28
arch/arm64/kernel/psci-call.S

@@ -1,28 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2015 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#include <linux/linkage.h>
-
-/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
-ENTRY(__invoke_psci_fn_hvc)
-	hvc	#0
-	ret
-ENDPROC(__invoke_psci_fn_hvc)
-
-/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
-ENTRY(__invoke_psci_fn_smc)
-	smc	#0
-	ret
-ENDPROC(__invoke_psci_fn_smc)

+ 43 - 0
arch/arm64/kernel/smccc-call.S

@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+	.macro SMCCC instr
+	.cfi_startproc
+	\instr	#0
+	ldr	x4, [sp]
+	stp	x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
+	stp	x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
+	ret
+	.cfi_endproc
+	.endm
+
+/*
+ * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *		  unsigned long a3, unsigned long a4, unsigned long a5,
+ *		  unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_smc)
+	SMCCC	smc
+ENDPROC(arm_smccc_smc)
+
+/*
+ * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *		  unsigned long a3, unsigned long a4, unsigned long a5,
+ *		  unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_hvc)
+	SMCCC	hvc
+ENDPROC(arm_smccc_hvc)

+ 3 - 0
drivers/firmware/Kconfig

@@ -173,6 +173,9 @@ config QCOM_SCM_64
 	def_bool y
 	def_bool y
 	depends on QCOM_SCM && ARM64
 	depends on QCOM_SCM && ARM64
 
 
+config HAVE_ARM_SMCCC
+	bool
+
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
 source "drivers/firmware/efi/Kconfig"

+ 21 - 2
drivers/firmware/psci.c

@@ -13,6 +13,7 @@
 
 
 #define pr_fmt(fmt) "psci: " fmt
 #define pr_fmt(fmt) "psci: " fmt
 
 
+#include <linux/arm-smccc.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <linux/of.h>
 #include <linux/of.h>
@@ -58,8 +59,6 @@ struct psci_operations psci_ops;
 
 
 typedef unsigned long (psci_fn)(unsigned long, unsigned long,
 typedef unsigned long (psci_fn)(unsigned long, unsigned long,
 				unsigned long, unsigned long);
 				unsigned long, unsigned long);
-asmlinkage psci_fn __invoke_psci_fn_hvc;
-asmlinkage psci_fn __invoke_psci_fn_smc;
 static psci_fn *invoke_psci_fn;
 static psci_fn *invoke_psci_fn;
 
 
 enum psci_function {
 enum psci_function {
@@ -107,6 +106,26 @@ bool psci_power_state_is_valid(u32 state)
 	return !(state & ~valid_mask);
 	return !(state & ~valid_mask);
 }
 }
 
 
+static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+			unsigned long arg0, unsigned long arg1,
+			unsigned long arg2)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+	return res.a0;
+}
+
+static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+			unsigned long arg0, unsigned long arg1,
+			unsigned long arg2)
+{
+	struct arm_smccc_res res;
+
+	arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+	return res.a0;
+}
+
 static int psci_to_linux_errno(int errno)
 static int psci_to_linux_errno(int errno)
 {
 {
 	switch (errno) {
 	switch (errno) {

+ 5 - 5
drivers/soc/qcom/spm.c

@@ -116,7 +116,7 @@ static const struct spm_reg_data spm_reg_8064_cpu = {
 
 
 static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
 static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
 
 
-typedef int (*idle_fn)(int);
+typedef int (*idle_fn)(void);
 static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
 static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
 
 
 static inline void spm_register_write(struct spm_driver_data *drv,
 static inline void spm_register_write(struct spm_driver_data *drv,
@@ -179,10 +179,10 @@ static int qcom_pm_collapse(unsigned long int unused)
 	return -1;
 	return -1;
 }
 }
 
 
-static int qcom_cpu_spc(int cpu)
+static int qcom_cpu_spc(void)
 {
 {
 	int ret;
 	int ret;
-	struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu);
+	struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
 
 
 	spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
 	spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
 	ret = cpu_suspend(0, qcom_pm_collapse);
 	ret = cpu_suspend(0, qcom_pm_collapse);
@@ -197,9 +197,9 @@ static int qcom_cpu_spc(int cpu)
 	return ret;
 	return ret;
 }
 }
 
 
-static int qcom_idle_enter(int cpu, unsigned long index)
+static int qcom_idle_enter(unsigned long index)
 {
 {
-	return per_cpu(qcom_idle_ops, cpu)[index](cpu);
+	return __this_cpu_read(qcom_idle_ops)[index]();
 }
 }
 
 
 static const struct of_device_id qcom_idle_state_match[] __initconst = {
 static const struct of_device_id qcom_idle_state_match[] __initconst = {

+ 104 - 0
include/linux/arm-smccc.h

@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_ARM_SMCCC_H
+#define __LINUX_ARM_SMCCC_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+/*
+ * This file provides common defines for ARM SMC Calling Convention as
+ * specified in
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ */
+
+#define ARM_SMCCC_STD_CALL		0
+#define ARM_SMCCC_FAST_CALL		1
+#define ARM_SMCCC_TYPE_SHIFT		31
+
+#define ARM_SMCCC_SMC_32		0
+#define ARM_SMCCC_SMC_64		1
+#define ARM_SMCCC_CALL_CONV_SHIFT	30
+
+#define ARM_SMCCC_OWNER_MASK		0x3F
+#define ARM_SMCCC_OWNER_SHIFT		24
+
+#define ARM_SMCCC_FUNC_MASK		0xFFFF
+
+#define ARM_SMCCC_IS_FAST_CALL(smc_val)	\
+	((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
+#define ARM_SMCCC_IS_64(smc_val) \
+	((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
+#define ARM_SMCCC_FUNC_NUM(smc_val)	((smc_val) & ARM_SMCCC_FUNC_MASK)
+#define ARM_SMCCC_OWNER_NUM(smc_val) \
+	(((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
+
+#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
+	(((type) << ARM_SMCCC_TYPE_SHIFT) | \
+	((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
+	(((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
+	((func_num) & ARM_SMCCC_FUNC_MASK))
+
+#define ARM_SMCCC_OWNER_ARCH		0
+#define ARM_SMCCC_OWNER_CPU		1
+#define ARM_SMCCC_OWNER_SIP		2
+#define ARM_SMCCC_OWNER_OEM		3
+#define ARM_SMCCC_OWNER_STANDARD	4
+#define ARM_SMCCC_OWNER_TRUSTED_APP	48
+#define ARM_SMCCC_OWNER_TRUSTED_APP_END	49
+#define ARM_SMCCC_OWNER_TRUSTED_OS	50
+#define ARM_SMCCC_OWNER_TRUSTED_OS_END	63
+
+/**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+ */
+struct arm_smccc_res {
+	unsigned long a0;
+	unsigned long a1;
+	unsigned long a2;
+	unsigned long a3;
+};
+
+/**
+ * arm_smccc_smc() - make SMC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This function is used to make SMC calls following SMC Calling Convention.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction.
+ */
+asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
+			unsigned long a2, unsigned long a3, unsigned long a4,
+			unsigned long a5, unsigned long a6, unsigned long a7,
+			struct arm_smccc_res *res);
+
+/**
+ * arm_smccc_hvc() - make HVC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This function is used to make HVC calls following SMC Calling
+ * Convention.  The content of the supplied param are copied to registers 0
+ * to 7 prior to the HVC instruction. The return values are updated with
+ * the content from register 0 to 3 on return from the HVC instruction.
+ */
+asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
+			unsigned long a2, unsigned long a3, unsigned long a4,
+			unsigned long a5, unsigned long a6, unsigned long a7,
+			struct arm_smccc_res *res);
+
+#endif /*__LINUX_ARM_SMCCC_H*/