Forráskód Böngészése

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "Included in this update are:

   - Patches from Gregory Clement to fix the coherent DMA cases in our
     dma-mapping code.

   - A number of CPU errata updates and fixes.

   - ARM cpuidle improvements from Jisheng Zhang.

   - Fix from Kees for the location of _etext.

   - Cleanups from Masahiro Yamada to avoid duplicated messages during
     the kernel build, and remove CONFIG_ARCH_HAS_BARRIERS.

   - Remove a udelay loop limitation, allowing for faster CPUs to
     calibrate the delay correctly.

   - Cleanup some left-overs from the SW PAN implementation.

   - Ensure that a modified address limit is not visible to exception
     handlers"

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (21 commits)
  ARM: 8586/1: cpuidle: make arm_cpuidle_suspend() a bit more efficient
  ARM: 8585/1: cpuidle: fix !cpuidle_ops[cpu].init case during init
  ARM: 8561/4: dma-mapping: Fix the coherent case when iommu is used
  ARM: 8561/3: dma-mapping: Don't use outer_flush_range when the L2C is coherent
  ARM: 8560/1: errata: Workaround errata A12 825619 / A17 852421
  ARM: 8559/1: errata: Workaround erratum A12 821420
  ARM: 8558/1: errata: Workaround errata A12 818325/852422 A17 852423
  ARM: save and reset the address limit when entering an exception
  ARM: 8577/1: Fix Cortex-A15 798181 errata initialization
  ARM: 8584/1: floppy: avoid gcc-6 warning
  ARM: 8583/1: mm: fix location of _etext
  ARM: 8582/1: remove unused CONFIG_ARCH_HAS_BARRIERS
  ARM: 8306/1: loop_udelay: remove bogomips value limitation
  ARM: 8581/1: add missing <asm/prom.h> to arch/arm/kernel/devtree.c
  ARM: 8576/1: avoid duplicating "Kernel: arch/arm/boot/*Image is ready"
  ARM: 8556/1: on a generic DT system: do not touch l2x0
  ARM: uaccess: remove put_user() code duplication
  ARM: 8580/1: Remove orphaned __addr_ok() definition
  ARM: get rid of horrible *(unsigned int *)(regs + 1)
  ARM: introduce svc_pt_regs structure
  ...
Linus Torvalds 9 éve
szülő
commit
b5f00d18cc

+ 54 - 0
arch/arm/Kconfig

@@ -1186,6 +1186,60 @@ config ARM_ERRATA_773022
 	  loop buffer may deliver incorrect instructions. This
 	  loop buffer may deliver incorrect instructions. This
 	  workaround disables the loop buffer to avoid the erratum.
 	  workaround disables the loop buffer to avoid the erratum.
 
 
+config ARM_ERRATA_818325_852422
+	bool "ARM errata: A12: some seqs of opposed cond code instrs => deadlock or corruption"
+	depends on CPU_V7
+	help
+	  This option enables the workaround for:
+	  - Cortex-A12 818325: Execution of an UNPREDICTABLE STR or STM
+	    instruction might deadlock.  Fixed in r0p1.
+	  - Cortex-A12 852422: Execution of a sequence of instructions might
+	    lead to either a data corruption or a CPU deadlock.  Not fixed in
+	    any Cortex-A12 cores yet.
+	  This workaround for all both errata involves setting bit[12] of the
+	  Feature Register. This bit disables an optimisation applied to a
+	  sequence of 2 instructions that use opposing condition codes.
+
+config ARM_ERRATA_821420
+	bool "ARM errata: A12: sequence of VMOV to core registers might lead to a dead lock"
+	depends on CPU_V7
+	help
+	  This option enables the workaround for the 821420 Cortex-A12
+	  (all revs) erratum. In very rare timing conditions, a sequence
+	  of VMOV to Core registers instructions, for which the second
+	  one is in the shadow of a branch or abort, can lead to a
+	  deadlock when the VMOV instructions are issued out-of-order.
+
+config ARM_ERRATA_825619
+	bool "ARM errata: A12: DMB NSHST/ISHST mixed ... might cause deadlock"
+	depends on CPU_V7
+	help
+	  This option enables the workaround for the 825619 Cortex-A12
+	  (all revs) erratum. Within rare timing constraints, executing a
+	  DMB NSHST or DMB ISHST instruction followed by a mix of Cacheable
+	  and Device/Strongly-Ordered loads and stores might cause deadlock
+
+config ARM_ERRATA_852421
+	bool "ARM errata: A17: DMB ST might fail to create order between stores"
+	depends on CPU_V7
+	help
+	  This option enables the workaround for the 852421 Cortex-A17
+	  (r1p0, r1p1, r1p2) erratum. Under very rare timing conditions,
+	  execution of a DMB ST instruction might fail to properly order
+	  stores from GroupA and stores from GroupB.
+
+config ARM_ERRATA_852423
+	bool "ARM errata: A17: some seqs of opposed cond code instrs => deadlock or corruption"
+	depends on CPU_V7
+	help
+	  This option enables the workaround for:
+	  - Cortex-A17 852423: Execution of a sequence of instructions might
+	    lead to either a data corruption or a CPU deadlock.  Not fixed in
+	    any Cortex-A17 cores yet.
+	  This is identical to Cortex-A12 erratum 852422.  It is a separate
+	  config option from the A12 erratum due to the way errata are checked
+	  for and handled.
+
 endmenu
 endmenu
 
 
 source "arch/arm/common/Kconfig"
 source "arch/arm/common/Kconfig"

+ 1 - 0
arch/arm/Makefile

@@ -327,6 +327,7 @@ zImage: Image
 
 
 $(BOOT_TARGETS): vmlinux
 $(BOOT_TARGETS): vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+	@$(kecho) '  Kernel: $(boot)/$@ is ready'
 
 
 $(INSTALL_TARGETS):
 $(INSTALL_TARGETS):
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@

+ 1 - 5
arch/arm/boot/Makefile

@@ -31,7 +31,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
 
 
 $(obj)/xipImage: vmlinux FORCE
 $(obj)/xipImage: vmlinux FORCE
 	$(call if_changed,objcopy)
 	$(call if_changed,objcopy)
-	@$(kecho) '  Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+	@$(kecho) '  Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)'
 
 
 $(obj)/Image $(obj)/zImage: FORCE
 $(obj)/Image $(obj)/zImage: FORCE
 	@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
 	@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@@ -46,14 +46,12 @@ $(obj)/xipImage: FORCE
 
 
 $(obj)/Image: vmlinux FORCE
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 	$(call if_changed,objcopy)
-	@$(kecho) '  Kernel: $@ is ready'
 
 
 $(obj)/compressed/vmlinux: $(obj)/Image FORCE
 $(obj)/compressed/vmlinux: $(obj)/Image FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
 	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
 
 $(obj)/zImage:	$(obj)/compressed/vmlinux FORCE
 $(obj)/zImage:	$(obj)/compressed/vmlinux FORCE
 	$(call if_changed,objcopy)
 	$(call if_changed,objcopy)
-	@$(kecho) '  Kernel: $@ is ready'
 
 
 endif
 endif
 
 
@@ -78,14 +76,12 @@ fi
 $(obj)/uImage:	$(obj)/zImage FORCE
 $(obj)/uImage:	$(obj)/zImage FORCE
 	@$(check_for_multiple_loadaddr)
 	@$(check_for_multiple_loadaddr)
 	$(call if_changed,uimage)
 	$(call if_changed,uimage)
-	@$(kecho) '  Image $@ is ready'
 
 
 $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
 $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/bootp $@
 	$(Q)$(MAKE) $(build)=$(obj)/bootp $@
 
 
 $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
 $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
 	$(call if_changed,objcopy)
 	$(call if_changed,objcopy)
-	@$(kecho) '  Kernel: $@ is ready'
 
 
 PHONY += initrd install zinstall uinstall
 PHONY += initrd install zinstall uinstall
 initrd:
 initrd:

+ 2 - 2
arch/arm/include/asm/assembler.h

@@ -480,13 +480,13 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 	.macro	uaccess_save, tmp
 	.macro	uaccess_save, tmp
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 	mrc	p15, 0, \tmp, c3, c0, 0
 	mrc	p15, 0, \tmp, c3, c0, 0
-	str	\tmp, [sp, #S_FRAME_SIZE]
+	str	\tmp, [sp, #SVC_DACR]
 #endif
 #endif
 	.endm
 	.endm
 
 
 	.macro	uaccess_restore
 	.macro	uaccess_restore
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
-	ldr	r0, [sp, #S_FRAME_SIZE]
+	ldr	r0, [sp, #SVC_DACR]
 	mcr	p15, 0, r0, c3, c0, 0
 	mcr	p15, 0, r0, c3, c0, 0
 #endif
 #endif
 	.endm
 	.endm

+ 1 - 3
arch/arm/include/asm/barrier.h

@@ -44,9 +44,7 @@ extern void arm_heavy_mb(void);
 #define __arm_heavy_mb(x...) dsb(x)
 #define __arm_heavy_mb(x...) dsb(x)
 #endif
 #endif
 
 
-#ifdef CONFIG_ARCH_HAS_BARRIERS
-#include <mach/barriers.h>
-#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
 #define mb()		__arm_heavy_mb()
 #define mb()		__arm_heavy_mb()
 #define rmb()		dsb()
 #define rmb()		dsb()
 #define wmb()		__arm_heavy_mb(st)
 #define wmb()		__arm_heavy_mb(st)

+ 3 - 3
arch/arm/include/asm/delay.h

@@ -10,8 +10,8 @@
 #include <asm/param.h>	/* HZ */
 #include <asm/param.h>	/* HZ */
 
 
 #define MAX_UDELAY_MS	2
 #define MAX_UDELAY_MS	2
-#define UDELAY_MULT	((UL(2199023) * HZ) >> 11)
-#define UDELAY_SHIFT	30
+#define UDELAY_MULT	UL(2047 * HZ + 483648 * HZ / 1000000)
+#define UDELAY_SHIFT	31
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
@@ -34,7 +34,7 @@ extern struct arm_delay_ops {
  * it, it means that you're calling udelay() with an out of range value.
  * it, it means that you're calling udelay() with an out of range value.
  *
  *
  * With currently imposed limits, this means that we support a max delay
  * With currently imposed limits, this means that we support a max delay
- * of 2000us. Further limits: HZ<=1000 and bogomips<=3355
+ * of 2000us. Further limits: HZ<=1000
  */
  */
 extern void __bad_udelay(void);
 extern void __bad_udelay(void);
 
 

+ 1 - 1
arch/arm/include/asm/floppy.h

@@ -17,7 +17,7 @@
 
 
 #define fd_outb(val,port)			\
 #define fd_outb(val,port)			\
 	do {					\
 	do {					\
-		if ((port) == FD_DOR)		\
+		if ((port) == (u32)FD_DOR)	\
 			fd_setdor((val));	\
 			fd_setdor((val));	\
 		else				\
 		else				\
 			outb((val),(port));	\
 			outb((val),(port));	\

+ 10 - 0
arch/arm/include/asm/ptrace.h

@@ -13,10 +13,20 @@
 #include <uapi/asm/ptrace.h>
 #include <uapi/asm/ptrace.h>
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
+#include <linux/types.h>
+
 struct pt_regs {
 struct pt_regs {
 	unsigned long uregs[18];
 	unsigned long uregs[18];
 };
 };
 
 
+struct svc_pt_regs {
+	struct pt_regs regs;
+	u32 dacr;
+	u32 addr_limit;
+};
+
+#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
+
 #define user_mode(regs)	\
 #define user_mode(regs)	\
 	(((regs)->ARM_cpsr & 0xf) == 0)
 	(((regs)->ARM_cpsr & 0xf) == 0)
 
 

+ 49 - 65
arch/arm/include/asm/uaccess.h

@@ -104,14 +104,6 @@ static inline void set_fs(mm_segment_t fs)
 
 
 #define segment_eq(a, b)	((a) == (b))
 #define segment_eq(a, b)	((a) == (b))
 
 
-#define __addr_ok(addr) ({ \
-	unsigned long flag; \
-	__asm__("cmp %2, %0; movlo %0, #0" \
-		: "=&r" (flag) \
-		: "0" (current_thread_info()->addr_limit), "r" (addr) \
-		: "cc"); \
-	(flag == 0); })
-
 /* We use 33-bit arithmetic here... */
 /* We use 33-bit arithmetic here... */
 #define __range_ok(addr, size) ({ \
 #define __range_ok(addr, size) ({ \
 	unsigned long flag, roksum; \
 	unsigned long flag, roksum; \
@@ -238,49 +230,23 @@ extern int __put_user_2(void *, unsigned int);
 extern int __put_user_4(void *, unsigned int);
 extern int __put_user_4(void *, unsigned int);
 extern int __put_user_8(void *, unsigned long long);
 extern int __put_user_8(void *, unsigned long long);
 
 
-#define __put_user_x(__r2, __p, __e, __l, __s)				\
-	   __asm__ __volatile__ (					\
-		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
-		__asmeq("%3", "r1")					\
-		"bl	__put_user_" #__s				\
-		: "=&r" (__e)						\
-		: "0" (__p), "r" (__r2), "r" (__l)			\
-		: "ip", "lr", "cc")
-
-#define __put_user_check(x, p)						\
+#define __put_user_check(__pu_val, __ptr, __err, __s)			\
 	({								\
 	({								\
 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
-		const typeof(*(p)) __user *__tmp_p = (p);		\
-		register const typeof(*(p)) __r2 asm("r2") = (x);	\
-		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
+		register typeof(__pu_val) __r2 asm("r2") = __pu_val;	\
+		register const void __user *__p asm("r0") = __ptr;	\
 		register unsigned long __l asm("r1") = __limit;		\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
 		register int __e asm("r0");				\
-		unsigned int __ua_flags = uaccess_save_and_enable();	\
-		switch (sizeof(*(__p))) {				\
-		case 1:							\
-			__put_user_x(__r2, __p, __e, __l, 1);		\
-			break;						\
-		case 2:							\
-			__put_user_x(__r2, __p, __e, __l, 2);		\
-			break;						\
-		case 4:							\
-			__put_user_x(__r2, __p, __e, __l, 4);		\
-			break;						\
-		case 8:							\
-			__put_user_x(__r2, __p, __e, __l, 8);		\
-			break;						\
-		default: __e = __put_user_bad(); break;			\
-		}							\
-		uaccess_restore(__ua_flags);				\
-		__e;							\
+		__asm__ __volatile__ (					\
+			__asmeq("%0", "r0") __asmeq("%2", "r2")		\
+			__asmeq("%3", "r1")				\
+			"bl	__put_user_" #__s			\
+			: "=&r" (__e)					\
+			: "0" (__p), "r" (__r2), "r" (__l)		\
+			: "ip", "lr", "cc");				\
+		__err = __e;						\
 	})
 	})
 
 
-#define put_user(x, p)							\
-	({								\
-		might_fault();						\
-		__put_user_check(x, p);					\
-	 })
-
 #else /* CONFIG_MMU */
 #else /* CONFIG_MMU */
 
 
 /*
 /*
@@ -298,7 +264,7 @@ static inline void set_fs(mm_segment_t fs)
 }
 }
 
 
 #define get_user(x, p)	__get_user(x, p)
 #define get_user(x, p)	__get_user(x, p)
-#define put_user(x, p)	__put_user(x, p)
+#define __put_user_check __put_user_nocheck
 
 
 #endif /* CONFIG_MMU */
 #endif /* CONFIG_MMU */
 
 
@@ -389,36 +355,54 @@ do {									\
 #define __get_user_asm_word(x, addr, err)			\
 #define __get_user_asm_word(x, addr, err)			\
 	__get_user_asm(x, addr, err, ldr)
 	__get_user_asm(x, addr, err, ldr)
 
 
+
+#define __put_user_switch(x, ptr, __err, __fn)				\
+	do {								\
+		const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);	\
+		__typeof__(*(ptr)) __pu_val = (x);			\
+		unsigned int __ua_flags;				\
+		might_fault();						\
+		__ua_flags = uaccess_save_and_enable();			\
+		switch (sizeof(*(ptr))) {				\
+		case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;	\
+		case 2:	__fn(__pu_val, __pu_ptr, __err, 2); break;	\
+		case 4:	__fn(__pu_val, __pu_ptr, __err, 4); break;	\
+		case 8:	__fn(__pu_val, __pu_ptr, __err, 8); break;	\
+		default: __err = __put_user_bad(); break;		\
+		}							\
+		uaccess_restore(__ua_flags);				\
+	} while (0)
+
+#define put_user(x, ptr)						\
+({									\
+	int __pu_err = 0;						\
+	__put_user_switch((x), (ptr), __pu_err, __put_user_check);	\
+	__pu_err;							\
+})
+
 #define __put_user(x, ptr)						\
 #define __put_user(x, ptr)						\
 ({									\
 ({									\
 	long __pu_err = 0;						\
 	long __pu_err = 0;						\
-	__put_user_err((x), (ptr), __pu_err);				\
+	__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);	\
 	__pu_err;							\
 	__pu_err;							\
 })
 })
 
 
 #define __put_user_error(x, ptr, err)					\
 #define __put_user_error(x, ptr, err)					\
 ({									\
 ({									\
-	__put_user_err((x), (ptr), err);				\
+	__put_user_switch((x), (ptr), (err), __put_user_nocheck);	\
 	(void) 0;							\
 	(void) 0;							\
 })
 })
 
 
-#define __put_user_err(x, ptr, err)					\
-do {									\
-	unsigned long __pu_addr = (unsigned long)(ptr);			\
-	unsigned int __ua_flags;					\
-	__typeof__(*(ptr)) __pu_val = (x);				\
-	__chk_user_ptr(ptr);						\
-	might_fault();							\
-	__ua_flags = uaccess_save_and_enable();				\
-	switch (sizeof(*(ptr))) {					\
-	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
-	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
-	case 4: __put_user_asm_word(__pu_val, __pu_addr, err);	break;	\
-	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
-	default: __put_user_bad();					\
-	}								\
-	uaccess_restore(__ua_flags);					\
-} while (0)
+#define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
+	do {								\
+		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
+		__put_user_nocheck_##__size(x, __pu_addr, __err);	\
+	} while (0)
+
+#define __put_user_nocheck_1 __put_user_asm_byte
+#define __put_user_nocheck_2 __put_user_asm_half
+#define __put_user_nocheck_4 __put_user_asm_word
+#define __put_user_nocheck_8 __put_user_asm_dword
 
 
 #define __put_user_asm(x, __pu_addr, err, instr)		\
 #define __put_user_asm(x, __pu_addr, err, instr)		\
 	__asm__ __volatile__(					\
 	__asm__ __volatile__(					\

+ 4 - 1
arch/arm/kernel/asm-offsets.c

@@ -107,7 +107,10 @@ int main(void)
   DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
   DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
   DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
   DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
   DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
   DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
-  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
+  DEFINE(PT_REGS_SIZE,		sizeof(struct pt_regs));
+  DEFINE(SVC_DACR,		offsetof(struct svc_pt_regs, dacr));
+  DEFINE(SVC_ADDR_LIMIT,	offsetof(struct svc_pt_regs, addr_limit));
+  DEFINE(SVC_REGS_SIZE,		sizeof(struct svc_pt_regs));
   BLANK();
   BLANK();
 #ifdef CONFIG_CACHE_L2X0
 #ifdef CONFIG_CACHE_L2X0
   DEFINE(L2X0_R_PHY_BASE,	offsetof(struct l2x0_regs, phy_base));
   DEFINE(L2X0_R_PHY_BASE,	offsetof(struct l2x0_regs, phy_base));

+ 13 - 10
arch/arm/kernel/cpuidle.c

@@ -47,18 +47,13 @@ int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
  * This function calls the underlying arch specific low level PM code as
  * This function calls the underlying arch specific low level PM code as
  * registered at the init time.
  * registered at the init time.
  *
  *
- * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the
- * callback otherwise.
+ * Returns the result of the suspend callback.
  */
  */
 int arm_cpuidle_suspend(int index)
 int arm_cpuidle_suspend(int index)
 {
 {
-	int ret = -EOPNOTSUPP;
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
 
 
-	if (cpuidle_ops[cpu].suspend)
-		ret = cpuidle_ops[cpu].suspend(index);
-
-	return ret;
+	return cpuidle_ops[cpu].suspend(index);
 }
 }
 
 
 /**
 /**
@@ -92,7 +87,8 @@ static const struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
  * process.
  * process.
  *
  *
  * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
  * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
- * no cpuidle_ops is registered for the 'enable-method'.
+ * no cpuidle_ops is registered for the 'enable-method', or if either init or
+ * suspend callback isn't defined.
  */
  */
 static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
 static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
 {
 {
@@ -110,6 +106,12 @@ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}
 
 
+	if (!ops->init || !ops->suspend) {
+		pr_warn("cpuidle_ops '%s': no init or suspend callback\n",
+			enable_method);
+		return -EOPNOTSUPP;
+	}
+
 	cpuidle_ops[cpu] = *ops; /* structure copy */
 	cpuidle_ops[cpu] = *ops; /* structure copy */
 
 
 	pr_notice("cpuidle: enable-method property '%s'"
 	pr_notice("cpuidle: enable-method property '%s'"
@@ -129,7 +131,8 @@ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
  * Returns:
  * Returns:
  *  0 on success,
  *  0 on success,
  *  -ENODEV if it fails to find the cpu node in the device tree,
  *  -ENODEV if it fails to find the cpu node in the device tree,
- *  -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu,
+ *  -EOPNOTSUPP if it does not find a registered and valid cpuidle_ops for
+ *  this cpu,
  *  -ENOENT if it fails to find an 'enable-method' property,
  *  -ENOENT if it fails to find an 'enable-method' property,
  *  -ENXIO if the HW reports a failure or a misconfiguration,
  *  -ENXIO if the HW reports a failure or a misconfiguration,
  *  -ENOMEM if the HW report an memory allocation failure 
  *  -ENOMEM if the HW report an memory allocation failure 
@@ -143,7 +146,7 @@ int __init arm_cpuidle_init(int cpu)
 		return -ENODEV;
 		return -ENODEV;
 
 
 	ret = arm_cpuidle_read_ops(cpu_node, cpu);
 	ret = arm_cpuidle_read_ops(cpu_node, cpu);
-	if (!ret && cpuidle_ops[cpu].init)
+	if (!ret)
 		ret = cpuidle_ops[cpu].init(cpu_node, cpu);
 		ret = cpuidle_ops[cpu].init(cpu_node, cpu);
 
 
 	of_node_put(cpu_node);
 	of_node_put(cpu_node);

+ 3 - 0
arch/arm/kernel/devtree.c

@@ -23,6 +23,7 @@
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/page.h>
 #include <asm/page.h>
+#include <asm/prom.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_plat.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 #include <asm/mach-types.h>
@@ -213,6 +214,8 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
 
 
 #if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M)
 #if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M)
 	DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
 	DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
+		.l2c_aux_val = 0x0,
+		.l2c_aux_mask = ~0x0,
 	MACHINE_END
 	MACHINE_END
 
 
 	mdesc_best = &__mach_desc_GENERIC_DT;
 	mdesc_best = &__mach_desc_GENERIC_DT;

+ 12 - 7
arch/arm/kernel/entry-armv.S

@@ -92,7 +92,7 @@
  * Invalid mode handlers
  * Invalid mode handlers
  */
  */
 	.macro	inv_entry, reason
 	.macro	inv_entry, reason
-	sub	sp, sp, #S_FRAME_SIZE
+	sub	sp, sp, #PT_REGS_SIZE
  ARM(	stmib	sp, {r1 - lr}		)
  ARM(	stmib	sp, {r1 - lr}		)
  THUMB(	stmia	sp, {r0 - r12}		)
  THUMB(	stmia	sp, {r0 - r12}		)
  THUMB(	str	sp, [sp, #S_SP]		)
  THUMB(	str	sp, [sp, #S_SP]		)
@@ -152,7 +152,7 @@ ENDPROC(__und_invalid)
 	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
 	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart		)
  UNWIND(.fnstart		)
  UNWIND(.save {r0 - pc}		)
  UNWIND(.save {r0 - pc}		)
-	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
+	sub	sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	mov	r0, sp		)
  SPFIX(	mov	r0, sp		)
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
 	ldmia	r0, {r3 - r5}
 	ldmia	r0, {r3 - r5}
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	mov	r6, #-1			@  ""  ""      ""       ""
 	mov	r6, #-1			@  ""  ""      ""       ""
-	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
+	add	r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
  SPFIX(	addeq	r2, r2, #4	)
  SPFIX(	addeq	r2, r2, #4	)
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 					@ from the exception stack
 					@ from the exception stack
@@ -185,6 +185,12 @@ ENDPROC(__und_invalid)
 	@
 	@
 	stmia	r7, {r2 - r6}
 	stmia	r7, {r2 - r6}
 
 
+	get_thread_info tsk
+	ldr	r0, [tsk, #TI_ADDR_LIMIT]
+	mov	r1, #TASK_SIZE
+	str	r1, [tsk, #TI_ADDR_LIMIT]
+	str	r0, [sp, #SVC_ADDR_LIMIT]
+
 	uaccess_save r0
 	uaccess_save r0
 	.if \uaccess
 	.if \uaccess
 	uaccess_disable r0
 	uaccess_disable r0
@@ -213,7 +219,6 @@ __irq_svc:
 	irq_handler
 	irq_handler
 
 
 #ifdef CONFIG_PREEMPT
 #ifdef CONFIG_PREEMPT
-	get_thread_info tsk
 	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
 	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 	teq	r8, #0				@ if preempt count != 0
 	teq	r8, #0				@ if preempt count != 0
@@ -366,17 +371,17 @@ ENDPROC(__fiq_abt)
 /*
 /*
  * User mode handlers
  * User mode handlers
  *
  *
- * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
+ * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
  */
  */
 
 
-#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 #endif
 
 
 	.macro	usr_entry, trace=1, uaccess=1
 	.macro	usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart	)
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)	@ don't unwind the user space
  UNWIND(.cantunwind	)	@ don't unwind the user space
-	sub	sp, sp, #S_FRAME_SIZE
+	sub	sp, sp, #PT_REGS_SIZE
  ARM(	stmib	sp, {r1 - r12}	)
  ARM(	stmib	sp, {r1 - r12}	)
  THUMB(	stmia	sp, {r0 - r12}	)
  THUMB(	stmia	sp, {r0 - r12}	)
 
 

+ 1 - 1
arch/arm/kernel/entry-common.S

@@ -145,7 +145,7 @@ ENTRY(vector_swi)
 #ifdef CONFIG_CPU_V7M
 #ifdef CONFIG_CPU_V7M
 	v7m_exception_entry
 	v7m_exception_entry
 #else
 #else
-	sub	sp, sp, #S_FRAME_SIZE
+	sub	sp, sp, #PT_REGS_SIZE
 	stmia	sp, {r0 - r12}			@ Calling r0 - r12
 	stmia	sp, {r0 - r12}			@ Calling r0 - r12
  ARM(	add	r8, sp, #S_PC		)
  ARM(	add	r8, sp, #S_PC		)
  ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
  ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr

+ 8 - 4
arch/arm/kernel/entry-header.S

@@ -90,7 +90,7 @@
 	@ Linux expects to have irqs off. Do it here before taking stack space
 	@ Linux expects to have irqs off. Do it here before taking stack space
 	cpsid	i
 	cpsid	i
 
 
-	sub	sp, #S_FRAME_SIZE-S_IP
+	sub	sp, #PT_REGS_SIZE-S_IP
 	stmdb	sp!, {r0-r11}
 	stmdb	sp!, {r0-r11}
 
 
 	@ load saved r12, lr, return address and xPSR.
 	@ load saved r12, lr, return address and xPSR.
@@ -160,7 +160,7 @@
 	ldmia	sp!, {r0-r11}
 	ldmia	sp!, {r0-r11}
 
 
 	@ restore main sp
 	@ restore main sp
-	add	sp, sp, #S_FRAME_SIZE-S_IP
+	add	sp, sp, #PT_REGS_SIZE-S_IP
 
 
 	cpsie	i
 	cpsie	i
 	bx	lr
 	bx	lr
@@ -215,7 +215,9 @@
 	blne	trace_hardirqs_off
 	blne	trace_hardirqs_off
 #endif
 #endif
 	.endif
 	.endif
+	ldr	r1, [sp, #SVC_ADDR_LIMIT]
 	uaccess_restore
 	uaccess_restore
+	str	r1, [tsk, #TI_ADDR_LIMIT]
 
 
 #ifndef CONFIG_THUMB2_KERNEL
 #ifndef CONFIG_THUMB2_KERNEL
 	@ ARM mode SVC restore
 	@ ARM mode SVC restore
@@ -259,7 +261,9 @@
 	@ on the stack remains correct).
 	@ on the stack remains correct).
 	@
 	@
 	.macro  svc_exit_via_fiq
 	.macro  svc_exit_via_fiq
+	ldr	r1, [sp, #SVC_ADDR_LIMIT]
 	uaccess_restore
 	uaccess_restore
+	str	r1, [tsk, #TI_ADDR_LIMIT]
 #ifndef CONFIG_THUMB2_KERNEL
 #ifndef CONFIG_THUMB2_KERNEL
 	@ ARM mode restore
 	@ ARM mode restore
 	mov	r0, sp
 	mov	r0, sp
@@ -307,7 +311,7 @@
 	.endif
 	.endif
 	mov	r0, r0				@ ARMv5T and earlier require a nop
 	mov	r0, r0				@ ARMv5T and earlier require a nop
 						@ after ldm {}^
 						@ after ldm {}^
-	add	sp, sp, #\offset + S_FRAME_SIZE
+	add	sp, sp, #\offset + PT_REGS_SIZE
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 #elif defined(CONFIG_CPU_V7M)
 #elif defined(CONFIG_CPU_V7M)
 	@ V7M restore.
 	@ V7M restore.
@@ -334,7 +338,7 @@
 	.else
 	.else
 	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
 	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
 	.endif
 	.endif
-	add	sp, sp, #S_FRAME_SIZE - S_SP
+	add	sp, sp, #PT_REGS_SIZE - S_SP
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 #endif	/* !CONFIG_THUMB2_KERNEL */
 #endif	/* !CONFIG_THUMB2_KERNEL */
 	.endm
 	.endm

+ 1 - 1
arch/arm/kernel/entry-v7m.S

@@ -73,7 +73,7 @@ __irq_entry:
 	@ correctness they don't need to be restored. So only r8-r11 must be
 	@ correctness they don't need to be restored. So only r8-r11 must be
 	@ restored here. The easiest way to do so is to restore r0-r7, too.
 	@ restored here. The easiest way to do so is to restore r0-r7, too.
 	ldmia	sp!, {r0-r11}
 	ldmia	sp!, {r0-r11}
-	add	sp, #S_FRAME_SIZE-S_IP
+	add	sp, #PT_REGS_SIZE-S_IP
 	cpsie	i
 	cpsie	i
 	bx	lr
 	bx	lr
 ENDPROC(__irq_entry)
 ENDPROC(__irq_entry)

+ 9 - 5
arch/arm/kernel/process.c

@@ -96,19 +96,23 @@ void __show_regs(struct pt_regs *regs)
 	unsigned long flags;
 	unsigned long flags;
 	char buf[64];
 	char buf[64];
 #ifndef CONFIG_CPU_V7M
 #ifndef CONFIG_CPU_V7M
-	unsigned int domain;
+	unsigned int domain, fs;
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 	/*
 	/*
 	 * Get the domain register for the parent context. In user
 	 * Get the domain register for the parent context. In user
 	 * mode, we don't save the DACR, so lets use what it should
 	 * mode, we don't save the DACR, so lets use what it should
 	 * be. For other modes, we place it after the pt_regs struct.
 	 * be. For other modes, we place it after the pt_regs struct.
 	 */
 	 */
-	if (user_mode(regs))
+	if (user_mode(regs)) {
 		domain = DACR_UACCESS_ENABLE;
 		domain = DACR_UACCESS_ENABLE;
-	else
-		domain = *(unsigned int *)(regs + 1);
+		fs = get_fs();
+	} else {
+		domain = to_svc_pt_regs(regs)->dacr;
+		fs = to_svc_pt_regs(regs)->addr_limit;
+	}
 #else
 #else
 	domain = get_domain();
 	domain = get_domain();
+	fs = get_fs();
 #endif
 #endif
 #endif
 #endif
 
 
@@ -144,7 +148,7 @@ void __show_regs(struct pt_regs *regs)
 		if ((domain & domain_mask(DOMAIN_USER)) ==
 		if ((domain & domain_mask(DOMAIN_USER)) ==
 		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
 		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
 			segment = "none";
 			segment = "none";
-		else if (get_fs() == get_ds())
+		else if (fs == get_ds())
 			segment = "kernel";
 			segment = "kernel";
 		else
 		else
 			segment = "user";
 			segment = "user";

+ 1 - 1
arch/arm/kernel/setup.c

@@ -844,7 +844,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
 	struct resource *res;
 	struct resource *res;
 
 
 	kernel_code.start   = virt_to_phys(_text);
 	kernel_code.start   = virt_to_phys(_text);
-	kernel_code.end     = virt_to_phys(_etext - 1);
+	kernel_code.end     = virt_to_phys(__init_begin - 1);
 	kernel_data.start   = virt_to_phys(_sdata);
 	kernel_data.start   = virt_to_phys(_sdata);
 	kernel_data.end     = virt_to_phys(_end - 1);
 	kernel_data.end     = virt_to_phys(_end - 1);
 
 

+ 40 - 4
arch/arm/kernel/smp_tlb.c

@@ -93,17 +93,53 @@ void erratum_a15_798181_init(void)
 	unsigned int revidr = read_cpuid(CPUID_REVIDR);
 	unsigned int revidr = read_cpuid(CPUID_REVIDR);
 
 
 	/* Brahma-B15 r0p0..r0p2 affected
 	/* Brahma-B15 r0p0..r0p2 affected
-	 * Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
-	if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2)
+	 * Cortex-A15 r0p0..r3p3 w/o ECO fix affected
+	 * Fixes applied to A15 with respect to the revision and revidr are:
+	 *
+	 * r0p0-r2p1: No fixes applied
+	 * r2p2,r2p3:
+	 *	REVIDR[4]: 798181 Moving a virtual page that is being accessed
+	 *		   by an active process can lead to unexpected behavior
+	 *	REVIDR[9]: Not defined
+	 * r2p4,r3p0,r3p1,r3p2:
+	 *	REVIDR[4]: 798181 Moving a virtual page that is being accessed
+	 *		   by an active process can lead to unexpected behavior
+	 *	REVIDR[9]: 798181 Moving a virtual page that is being accessed
+	 *		   by an active process can lead to unexpected behavior
+	 *		   - This is an update to a previously released ECO.
+	 * r3p3:
+	 *	REVIDR[4]: Reserved
+	 *	REVIDR[9]: 798181 Moving a virtual page that is being accessed
+	 *		   by an active process can lead to unexpected behavior
+	 *		   - This is an update to a previously released ECO.
+	 *
+	 * Handling:
+	 *	REVIDR[9] set -> No WA
+	 *	REVIDR[4] set, REVIDR[9] cleared -> Partial WA
+	 *	Both cleared -> Full WA
+	 */
+	if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) {
 		erratum_a15_798181_handler = erratum_a15_798181_broadcast;
 		erratum_a15_798181_handler = erratum_a15_798181_broadcast;
-	else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 &&
-		 (revidr & 0x210) != 0x210) {
+	} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) {
+		erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+	} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) {
 		if (revidr & 0x10)
 		if (revidr & 0x10)
 			erratum_a15_798181_handler =
 			erratum_a15_798181_handler =
 				erratum_a15_798181_partial;
 				erratum_a15_798181_partial;
 		else
 		else
 			erratum_a15_798181_handler =
 			erratum_a15_798181_handler =
 				erratum_a15_798181_broadcast;
 				erratum_a15_798181_broadcast;
+	} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) {
+		if ((revidr & 0x210) == 0)
+			erratum_a15_798181_handler =
+				erratum_a15_798181_broadcast;
+		else if (revidr & 0x10)
+			erratum_a15_798181_handler =
+				erratum_a15_798181_partial;
+	} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) {
+		if ((revidr & 0x200) == 0)
+			erratum_a15_798181_handler =
+				erratum_a15_798181_partial;
 	}
 	}
 }
 }
 #endif
 #endif

+ 2 - 2
arch/arm/kernel/vmlinux.lds.S

@@ -125,6 +125,8 @@ SECTIONS
 #ifdef CONFIG_DEBUG_ALIGN_RODATA
 #ifdef CONFIG_DEBUG_ALIGN_RODATA
 	. = ALIGN(1<<SECTION_SHIFT);
 	. = ALIGN(1<<SECTION_SHIFT);
 #endif
 #endif
+	_etext = .;			/* End of text section */
+
 	RO_DATA(PAGE_SIZE)
 	RO_DATA(PAGE_SIZE)
 
 
 	. = ALIGN(4);
 	. = ALIGN(4);
@@ -155,8 +157,6 @@ SECTIONS
 
 
 	NOTES
 	NOTES
 
 
-	_etext = .;			/* End of text and rodata section */
-
 #ifdef CONFIG_DEBUG_RODATA
 #ifdef CONFIG_DEBUG_RODATA
 	. = ALIGN(1<<SECTION_SHIFT);
 	. = ALIGN(1<<SECTION_SHIFT);
 #else
 #else

+ 4 - 1
arch/arm/lib/Makefile

@@ -29,7 +29,10 @@ else
   lib-y	+= io-readsw-armv4.o io-writesw-armv4.o
   lib-y	+= io-readsw-armv4.o io-writesw-armv4.o
 endif
 endif
 
 
-lib-$(CONFIG_ARCH_RPC)		+= ecard.o io-acorn.o floppydma.o
+ifeq ($(CONFIG_ARCH_RPC),y)
+  lib-y				+= ecard.o io-acorn.o floppydma.o
+  AFLAGS_delay-loop.o		+= -march=armv4
+endif
 
 
 $(obj)/csumpartialcopy.o:	$(obj)/csumpartialcopygeneric.S
 $(obj)/csumpartialcopy.o:	$(obj)/csumpartialcopygeneric.S
 $(obj)/csumpartialcopyuser.o:	$(obj)/csumpartialcopygeneric.S
 $(obj)/csumpartialcopyuser.o:	$(obj)/csumpartialcopygeneric.S

+ 5 - 10
arch/arm/lib/delay-loop.S

@@ -10,6 +10,7 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
 #include <asm/delay.h>
 #include <asm/delay.h>
+
 		.text
 		.text
 
 
 .LC0:		.word	loops_per_jiffy
 .LC0:		.word	loops_per_jiffy
@@ -17,7 +18,6 @@
 
 
 /*
 /*
  * r0  <= 2000
  * r0  <= 2000
- * lpj <= 0x01ffffff (max. 3355 bogomips)
  * HZ  <= 1000
  * HZ  <= 1000
  */
  */
 
 
@@ -25,16 +25,11 @@ ENTRY(__loop_udelay)
 		ldr	r2, .LC1
 		ldr	r2, .LC1
 		mul	r0, r2, r0
 		mul	r0, r2, r0
 ENTRY(__loop_const_udelay)			@ 0 <= r0 <= 0x7fffff06
 ENTRY(__loop_const_udelay)			@ 0 <= r0 <= 0x7fffff06
-		mov	r1, #-1
 		ldr	r2, .LC0
 		ldr	r2, .LC0
-		ldr	r2, [r2]		@ max = 0x01ffffff
-		add	r0, r0, r1, lsr #32-14
-		mov	r0, r0, lsr #14		@ max = 0x0001ffff
-		add	r2, r2, r1, lsr #32-10
-		mov	r2, r2, lsr #10		@ max = 0x00007fff
-		mul	r0, r2, r0		@ max = 2^32-1
-		add	r0, r0, r1, lsr #32-6
-		movs	r0, r0, lsr #6
+		ldr	r2, [r2]
+		umull	r1, r0, r2, r0
+		adds	r1, r1, #0xffffffff
+		adcs	r0, r0, r0
 		reteq	lr
 		reteq	lr
 
 
 /*
 /*

+ 0 - 6
arch/arm/mm/Kconfig

@@ -1025,12 +1025,6 @@ config ARM_DMA_MEM_BUFFERABLE
 
 
 	  You are recommended say 'Y' here and debug any affected drivers.
 	  You are recommended say 'Y' here and debug any affected drivers.
 
 
-config ARCH_HAS_BARRIERS
-	bool
-	help
-	  This option allows the use of custom mandatory barriers
-	  included via the mach/barriers.h file.
-
 config ARM_HEAVY_MB
 config ARM_HEAVY_MB
 	bool
 	bool
 
 

+ 105 - 39
arch/arm/mm/dma-mapping.c

@@ -49,6 +49,7 @@ struct arm_dma_alloc_args {
 	pgprot_t prot;
 	pgprot_t prot;
 	const void *caller;
 	const void *caller;
 	bool want_vaddr;
 	bool want_vaddr;
+	int coherent_flag;
 };
 };
 
 
 struct arm_dma_free_args {
 struct arm_dma_free_args {
@@ -59,6 +60,9 @@ struct arm_dma_free_args {
 	bool want_vaddr;
 	bool want_vaddr;
 };
 };
 
 
+#define NORMAL	    0
+#define COHERENT    1
+
 struct arm_dma_allocator {
 struct arm_dma_allocator {
 	void *(*alloc)(struct arm_dma_alloc_args *args,
 	void *(*alloc)(struct arm_dma_alloc_args *args,
 		       struct page **ret_page);
 		       struct page **ret_page);
@@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
 	return mask;
 	return mask;
 }
 }
 
 
-static void __dma_clear_buffer(struct page *page, size_t size)
+static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
 {
 {
 	/*
 	/*
 	 * Ensure that the allocated pages are zeroed, and that any data
 	 * Ensure that the allocated pages are zeroed, and that any data
@@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size)
 		while (size > 0) {
 		while (size > 0) {
 			void *ptr = kmap_atomic(page);
 			void *ptr = kmap_atomic(page);
 			memset(ptr, 0, PAGE_SIZE);
 			memset(ptr, 0, PAGE_SIZE);
-			dmac_flush_range(ptr, ptr + PAGE_SIZE);
+			if (coherent_flag != COHERENT)
+				dmac_flush_range(ptr, ptr + PAGE_SIZE);
 			kunmap_atomic(ptr);
 			kunmap_atomic(ptr);
 			page++;
 			page++;
 			size -= PAGE_SIZE;
 			size -= PAGE_SIZE;
 		}
 		}
-		outer_flush_range(base, end);
+		if (coherent_flag != COHERENT)
+			outer_flush_range(base, end);
 	} else {
 	} else {
 		void *ptr = page_address(page);
 		void *ptr = page_address(page);
 		memset(ptr, 0, size);
 		memset(ptr, 0, size);
-		dmac_flush_range(ptr, ptr + size);
-		outer_flush_range(__pa(ptr), __pa(ptr) + size);
+		if (coherent_flag != COHERENT) {
+			dmac_flush_range(ptr, ptr + size);
+			outer_flush_range(__pa(ptr), __pa(ptr) + size);
+		}
 	}
 	}
 }
 }
 
 
@@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * Allocate a DMA buffer for 'dev' of size 'size' using the
  * specified gfp mask.  Note that 'size' must be page aligned.
  * specified gfp mask.  Note that 'size' must be page aligned.
  */
  */
-static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
+				       gfp_t gfp, int coherent_flag)
 {
 {
 	unsigned long order = get_order(size);
 	unsigned long order = get_order(size);
 	struct page *page, *p, *e;
 	struct page *page, *p, *e;
@@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
 		__free_page(p);
 		__free_page(p);
 
 
-	__dma_clear_buffer(page, size);
+	__dma_clear_buffer(page, size, coherent_flag);
 
 
 	return page;
 	return page;
 }
 }
@@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
 
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 				     pgprot_t prot, struct page **ret_page,
 				     pgprot_t prot, struct page **ret_page,
-				     const void *caller, bool want_vaddr);
+				     const void *caller, bool want_vaddr,
+				     int coherent_flag);
 
 
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
 				 pgprot_t prot, struct page **ret_page,
 				 pgprot_t prot, struct page **ret_page,
@@ -405,10 +415,13 @@ static int __init atomic_pool_init(void)
 	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
 	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
 	if (!atomic_pool)
 	if (!atomic_pool)
 		goto out;
 		goto out;
-
+	/*
+	 * The atomic pool is only used for non-coherent allocations
+	 * so we must pass NORMAL for coherent_flag.
+	 */
 	if (dev_get_cma_area(NULL))
 	if (dev_get_cma_area(NULL))
 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
-					      &page, atomic_pool_init, true);
+				      &page, atomic_pool_init, true, NORMAL);
 	else
 	else
 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
 					   &page, atomic_pool_init, true);
 					   &page, atomic_pool_init, true);
@@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
 {
 {
 	struct page *page;
 	struct page *page;
 	void *ptr = NULL;
 	void *ptr = NULL;
-	page = __dma_alloc_buffer(dev, size, gfp);
+	/*
+	 * __alloc_remap_buffer is only called when the device is
+	 * non-coherent
+	 */
+	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
 	if (!page)
 	if (!page)
 		return NULL;
 		return NULL;
 	if (!want_vaddr)
 	if (!want_vaddr)
@@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size)
 
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
 				     pgprot_t prot, struct page **ret_page,
 				     pgprot_t prot, struct page **ret_page,
-				     const void *caller, bool want_vaddr)
+				     const void *caller, bool want_vaddr,
+				     int coherent_flag)
 {
 {
 	unsigned long order = get_order(size);
 	unsigned long order = get_order(size);
 	size_t count = size >> PAGE_SHIFT;
 	size_t count = size >> PAGE_SHIFT;
@@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
 	if (!page)
 	if (!page)
 		return NULL;
 		return NULL;
 
 
-	__dma_clear_buffer(page, size);
+	__dma_clear_buffer(page, size, coherent_flag);
 
 
 	if (!want_vaddr)
 	if (!want_vaddr)
 		goto out;
 		goto out;
@@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
 #define __get_dma_pgprot(attrs, prot)				__pgprot(0)
 #define __get_dma_pgprot(attrs, prot)				__pgprot(0)
 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv)	NULL
 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv)	NULL
 #define __alloc_from_pool(size, ret_page)			NULL
 #define __alloc_from_pool(size, ret_page)			NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv)	NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag)	NULL
 #define __free_from_pool(cpu_addr, size)			do { } while (0)
 #define __free_from_pool(cpu_addr, size)			do { } while (0)
 #define __free_from_contiguous(dev, page, cpu_addr, size, wv)	do { } while (0)
 #define __free_from_contiguous(dev, page, cpu_addr, size, wv)	do { } while (0)
 #define __dma_free_remap(cpu_addr, size)			do { } while (0)
 #define __dma_free_remap(cpu_addr, size)			do { } while (0)
@@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
 				   struct page **ret_page)
 				   struct page **ret_page)
 {
 {
 	struct page *page;
 	struct page *page;
-	page = __dma_alloc_buffer(dev, size, gfp);
+	/* __alloc_simple_buffer is only called when the device is coherent */
+	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
 	if (!page)
 	if (!page)
 		return NULL;
 		return NULL;
 
 
@@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
 {
 {
 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
 				       ret_page, args->caller,
 				       ret_page, args->caller,
-				       args->want_vaddr);
+				       args->want_vaddr, args->coherent_flag);
 }
 }
 
 
 static void cma_allocator_free(struct arm_dma_free_args *args)
 static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 		.prot = prot,
 		.prot = prot,
 		.caller = caller,
 		.caller = caller,
 		.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
 		.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+		.coherent_flag = is_coherent ? COHERENT : NORMAL,
 	};
 	};
 
 
 #ifdef CONFIG_DMA_API_DEBUG
 #ifdef CONFIG_DMA_API_DEBUG
@@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
 static const int iommu_order_array[] = { 9, 8, 4, 0 };
 static const int iommu_order_array[] = { 9, 8, 4, 0 };
 
 
 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
-					  gfp_t gfp, struct dma_attrs *attrs)
+					  gfp_t gfp, struct dma_attrs *attrs,
+					  int coherent_flag)
 {
 {
 	struct page **pages;
 	struct page **pages;
 	int count = size >> PAGE_SHIFT;
 	int count = size >> PAGE_SHIFT;
@@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 		if (!page)
 		if (!page)
 			goto error;
 			goto error;
 
 
-		__dma_clear_buffer(page, size);
+		__dma_clear_buffer(page, size, coherent_flag);
 
 
 		for (i = 0; i < count; i++)
 		for (i = 0; i < count; i++)
 			pages[i] = page + i;
 			pages[i] = page + i;
@@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 				pages[i + j] = pages[i] + j;
 				pages[i + j] = pages[i] + j;
 		}
 		}
 
 
-		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
 		i += 1 << order;
 		i += 1 << order;
 		count -= 1 << order;
 		count -= 1 << order;
 	}
 	}
@@ -1455,13 +1476,16 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
 	return NULL;
 	return NULL;
 }
 }
 
 
-static void *__iommu_alloc_atomic(struct device *dev, size_t size,
-				  dma_addr_t *handle)
+static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
+				  dma_addr_t *handle, int coherent_flag)
 {
 {
 	struct page *page;
 	struct page *page;
 	void *addr;
 	void *addr;
 
 
-	addr = __alloc_from_pool(size, &page);
+	if (coherent_flag  == COHERENT)
+		addr = __alloc_simple_buffer(dev, size, gfp, &page);
+	else
+		addr = __alloc_from_pool(size, &page);
 	if (!addr)
 	if (!addr)
 		return NULL;
 		return NULL;
 
 
@@ -1477,14 +1501,18 @@ err_mapping:
 }
 }
 
 
 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
-				dma_addr_t handle, size_t size)
+			dma_addr_t handle, size_t size, int coherent_flag)
 {
 {
 	__iommu_remove_mapping(dev, handle, size);
 	__iommu_remove_mapping(dev, handle, size);
-	__free_from_pool(cpu_addr, size);
+	if (coherent_flag == COHERENT)
+		__dma_free_buffer(virt_to_page(cpu_addr), size);
+	else
+		__free_from_pool(cpu_addr, size);
 }
 }
 
 
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
-	    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
+	    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
+	    int coherent_flag)
 {
 {
 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	struct page **pages;
 	struct page **pages;
@@ -1493,8 +1521,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 	*handle = DMA_ERROR_CODE;
 	*handle = DMA_ERROR_CODE;
 	size = PAGE_ALIGN(size);
 	size = PAGE_ALIGN(size);
 
 
-	if (!gfpflags_allow_blocking(gfp))
-		return __iommu_alloc_atomic(dev, size, handle);
+	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
+		return __iommu_alloc_simple(dev, size, gfp, handle,
+					    coherent_flag);
 
 
 	/*
 	/*
 	 * Following is a work-around (a.k.a. hack) to prevent pages
 	 * Following is a work-around (a.k.a. hack) to prevent pages
@@ -1505,7 +1534,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 	 */
 	 */
 	gfp &= ~(__GFP_COMP);
 	gfp &= ~(__GFP_COMP);
 
 
-	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+	pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
 	if (!pages)
 	if (!pages)
 		return NULL;
 		return NULL;
 
 
@@ -1530,7 +1559,19 @@ err_buffer:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+		    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+	return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
+}
+
+static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
+		    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+	return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
+}
+
+static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		    struct dma_attrs *attrs)
 		    struct dma_attrs *attrs)
 {
 {
@@ -1540,8 +1581,6 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	unsigned long off = vma->vm_pgoff;
 	unsigned long off = vma->vm_pgoff;
 
 
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
 	if (!pages)
 	if (!pages)
 		return -ENXIO;
 		return -ENXIO;
 
 
@@ -1562,19 +1601,34 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 
 
 	return 0;
 	return 0;
 }
 }
+static int arm_iommu_mmap_attrs(struct device *dev,
+		struct vm_area_struct *vma, void *cpu_addr,
+		dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static int arm_coherent_iommu_mmap_attrs(struct device *dev,
+		struct vm_area_struct *vma, void *cpu_addr,
+		dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
 
 
 /*
 /*
  * free a page as defined by the above mapping.
  * free a page as defined by the above mapping.
  * Must not be called with IRQs disabled.
  * Must not be called with IRQs disabled.
  */
  */
-void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
-			  dma_addr_t handle, struct dma_attrs *attrs)
+void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+	dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
 {
 {
 	struct page **pages;
 	struct page **pages;
 	size = PAGE_ALIGN(size);
 	size = PAGE_ALIGN(size);
 
 
-	if (__in_atomic_pool(cpu_addr, size)) {
-		__iommu_free_atomic(dev, cpu_addr, handle, size);
+	if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
+		__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
 		return;
 		return;
 	}
 	}
 
 
@@ -1593,6 +1647,18 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 	__iommu_free_buffer(dev, pages, size, attrs);
 	__iommu_free_buffer(dev, pages, size, attrs);
 }
 }
 
 
+void arm_iommu_free_attrs(struct device *dev, size_t size,
+		    void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
+}
+
+void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
+		    void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
+}
+
 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
 				 void *cpu_addr, dma_addr_t dma_addr,
 				 void *cpu_addr, dma_addr_t dma_addr,
 				 size_t size, struct dma_attrs *attrs)
 				 size_t size, struct dma_attrs *attrs)
@@ -1997,9 +2063,9 @@ struct dma_map_ops iommu_ops = {
 };
 };
 
 
 struct dma_map_ops iommu_coherent_ops = {
 struct dma_map_ops iommu_coherent_ops = {
-	.alloc		= arm_iommu_alloc_attrs,
-	.free		= arm_iommu_free_attrs,
-	.mmap		= arm_iommu_mmap_attrs,
+	.alloc		= arm_coherent_iommu_alloc_attrs,
+	.free		= arm_coherent_iommu_free_attrs,
+	.mmap		= arm_coherent_iommu_mmap_attrs,
 	.get_sgtable	= arm_iommu_get_sgtable,
 	.get_sgtable	= arm_iommu_get_sgtable,
 
 
 	.map_page	= arm_coherent_iommu_map_page,
 	.map_page	= arm_coherent_iommu_map_page,

+ 43 - 0
arch/arm/mm/proc-v7.S

@@ -362,6 +362,39 @@ __ca15_errata:
 #endif
 #endif
 	b	__errata_finish
 	b	__errata_finish
 
 
+__ca12_errata:
+#ifdef CONFIG_ARM_ERRATA_818325_852422
+	mrc	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orr	r10, r10, #1 << 12		@ set bit #12
+	mcr	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_821420
+	mrc	p15, 0, r10, c15, c0, 2		@ read internal feature reg
+	orr	r10, r10, #1 << 1		@ set bit #1
+	mcr	p15, 0, r10, c15, c0, 2		@ write internal feature reg
+#endif
+#ifdef CONFIG_ARM_ERRATA_825619
+	mrc	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orr	r10, r10, #1 << 24		@ set bit #24
+	mcr	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
+	b	__errata_finish
+
+__ca17_errata:
+#ifdef CONFIG_ARM_ERRATA_852421
+	cmp	r6, #0x12			@ only present up to r1p2
+	mrcle	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orrle	r10, r10, #1 << 24		@ set bit #24
+	mcrle	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_852423
+	cmp	r6, #0x12			@ only present up to r1p2
+	mrcle	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orrle	r10, r10, #1 << 12		@ set bit #12
+	mcrle	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
+	b	__errata_finish
+
 __v7_pj4b_setup:
 __v7_pj4b_setup:
 #ifdef CONFIG_CPU_PJ4B
 #ifdef CONFIG_CPU_PJ4B
 
 
@@ -443,6 +476,16 @@ __v7_setup_cont:
 	teq	r0, r10
 	teq	r0, r10
 	beq	__ca9_errata
 	beq	__ca9_errata
 
 
+	/* Cortex-A12 Errata */
+	ldr	r10, =0x00000c0d		@ Cortex-A12 primary part number
+	teq	r0, r10
+	beq	__ca12_errata
+
+	/* Cortex-A17 Errata */
+	ldr	r10, =0x00000c0e		@ Cortex-A17 primary part number
+	teq	r0, r10
+	beq	__ca17_errata
+
 	/* Cortex-A15 Errata */
 	/* Cortex-A15 Errata */
 	ldr	r10, =0x00000c0f		@ Cortex-A15 primary part number
 	ldr	r10, =0x00000c0f		@ Cortex-A15 primary part number
 	teq	r0, r10
 	teq	r0, r10