Browse Source

Merge branches 'cleanup', 'fixes', 'misc', 'omap-barrier' and 'uaccess' into for-linus

Russell King 10 years ago
100 changed files with 2204 additions and 370 deletions
  1. 17 1
      Documentation/arm/sunxi/README
  2. 6 0
      Documentation/devicetree/bindings/arm/l2cc.txt
  3. 2 0
      Documentation/devicetree/bindings/arm/sunxi.txt
  4. 1 0
      Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
  5. 11 2
      Documentation/power/swsusp.txt
  6. 21 5
      MAINTAINERS
  7. 1 1
      Makefile
  8. 24 0
      arch/arm/Kconfig
  9. 1 1
      arch/arm/Kconfig.debug
  10. 3 0
      arch/arm/Makefile
  11. 4 0
      arch/arm/boot/dts/am335x-boneblack.dts
  12. 7 0
      arch/arm/boot/dts/am4372.dtsi
  13. 4 0
      arch/arm/boot/dts/am57xx-beagle-x15.dts
  14. 1041 1
      arch/arm/boot/dts/atlas7.dtsi
  15. 23 2
      arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
  16. 4 8
      arch/arm/common/mcpm_platsmp.c
  17. 0 1
      arch/arm/configs/multi_v7_defconfig
  18. 5 1
      arch/arm/configs/sunxi_defconfig
  19. 0 1
      arch/arm/include/asm/Kbuild
  20. 47 0
      arch/arm/include/asm/assembler.h
  21. 10 3
      arch/arm/include/asm/barrier.h
  22. 12 12
      arch/arm/include/asm/bitops.h
  23. 17 4
      arch/arm/include/asm/cacheflush.h
  24. 1 1
      arch/arm/include/asm/dma-mapping.h
  25. 43 10
      arch/arm/include/asm/domain.h
  26. 14 1
      arch/arm/include/asm/fixmap.h
  27. 17 2
      arch/arm/include/asm/futex.h
  28. 0 2
      arch/arm/include/asm/glue-cache.h
  29. 58 17
      arch/arm/include/asm/io.h
  30. 2 2
      arch/arm/include/asm/memory.h
  31. 0 17
      arch/arm/include/asm/outercache.h
  32. 1 0
      arch/arm/include/asm/pgtable-2level-hwdef.h
  33. 30 1
      arch/arm/include/asm/pgtable-2level.h
  34. 1 1
      arch/arm/include/asm/smp.h
  35. 9 0
      arch/arm/include/asm/smp_plat.h
  36. 4 1
      arch/arm/include/asm/switch_to.h
  37. 0 3
      arch/arm/include/asm/thread_info.h
  38. 80 5
      arch/arm/include/asm/uaccess.h
  39. 9 3
      arch/arm/kernel/armksyms.c
  40. 25 9
      arch/arm/kernel/entry-armv.S
  41. 3 0
      arch/arm/kernel/entry-common.S
  42. 47 65
      arch/arm/kernel/entry-header.S
  43. 4 4
      arch/arm/kernel/head.S
  44. 1 0
      arch/arm/kernel/irq.c
  45. 5 2
      arch/arm/kernel/perf_event.c
  46. 40 16
      arch/arm/kernel/process.c
  47. 1 1
      arch/arm/kernel/reboot.c
  48. 5 1
      arch/arm/kernel/setup.c
  49. 17 4
      arch/arm/kernel/smp.c
  50. 3 0
      arch/arm/kernel/swp_emulate.c
  51. 0 1
      arch/arm/kernel/traps.c
  52. 3 4
      arch/arm/kernel/vdso.c
  53. 3 3
      arch/arm/lib/clear_user.S
  54. 3 3
      arch/arm/lib/copy_from_user.S
  55. 3 3
      arch/arm/lib/copy_to_user.S
  56. 14 0
      arch/arm/lib/csumpartialcopyuser.S
  57. 2 0
      arch/arm/lib/memcpy.S
  58. 2 0
      arch/arm/lib/memset.S
  59. 3 3
      arch/arm/lib/uaccess_with_memcpy.c
  60. 1 0
      arch/arm/mach-mmp/pm-pxa910.c
  61. 7 0
      arch/arm/mach-omap2/Kconfig
  62. 1 0
      arch/arm/mach-omap2/common.c
  63. 9 0
      arch/arm/mach-omap2/common.h
  64. 0 1
      arch/arm/mach-omap2/dma.c
  65. 0 33
      arch/arm/mach-omap2/include/mach/barriers.h
  66. 2 0
      arch/arm/mach-omap2/io.c
  67. 121 0
      arch/arm/mach-omap2/omap4-common.c
  68. 3 5
      arch/arm/mach-omap2/sleep44xx.S
  69. 1 0
      arch/arm/mach-prima2/Kconfig
  70. 1 0
      arch/arm/mach-prima2/pm.c
  71. 45 3
      arch/arm/mach-prima2/rtciobrg.c
  72. 1 1
      arch/arm/mach-shmobile/common.h
  73. 2 2
      arch/arm/mach-shmobile/platsmp.c
  74. 1 1
      arch/arm/mach-shmobile/smp-r8a7790.c
  75. 1 1
      arch/arm/mach-shmobile/smp-r8a7791.c
  76. 1 1
      arch/arm/mach-shmobile/smp-sh73a0.c
  77. 1 1
      arch/arm/mach-sunxi/Kconfig
  78. 4 1
      arch/arm/mach-sunxi/sunxi.c
  79. 1 0
      arch/arm/mach-ux500/cache-l2x0.c
  80. 4 0
      arch/arm/mm/Kconfig
  81. 1 0
      arch/arm/mm/abort-ev4.S
  82. 3 1
      arch/arm/mm/abort-ev5t.S
  83. 3 1
      arch/arm/mm/abort-ev5tj.S
  84. 5 3
      arch/arm/mm/abort-ev6.S
  85. 1 0
      arch/arm/mm/abort-ev7.S
  86. 2 0
      arch/arm/mm/abort-lv4t.S
  87. 6 8
      arch/arm/mm/abort-macro.S
  88. 1 5
      arch/arm/mm/cache-feroceon-l2.c
  89. 5 0
      arch/arm/mm/cache-l2x0.c
  90. 14 10
      arch/arm/mm/dma-mapping.c
  91. 32 0
      arch/arm/mm/dma.h
  92. 15 0
      arch/arm/mm/flush.c
  93. 3 3
      arch/arm/mm/highmem.c
  94. 23 10
      arch/arm/mm/ioremap.c
  95. 90 9
      arch/arm/mm/mmu.c
  96. 26 13
      arch/arm/mm/nommu.c
  97. 10 0
      arch/arm/mm/pgd.c
  98. 9 5
      arch/arm/mm/proc-v7.S
  99. 1 1
      arch/arm/vdso/Makefile
  100. 33 23
      arch/arm/vdso/vdsomunge.c

+ 17 - 1
Documentation/arm/sunxi/README

@@ -36,7 +36,7 @@ SunXi family
         + User Manual
         + User Manual
           http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
           http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
 
 
-      - Allwinner A23
+      - Allwinner A23 (sun8i)
         + Datasheet
         + Datasheet
           http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf
           http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf
         + User Manual
         + User Manual
@@ -55,7 +55,23 @@ SunXi family
         + User Manual
         + User Manual
           http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf
           http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf
 
 
+      - Allwinner A33 (sun8i)
+        + Datasheet
+          http://dl.linux-sunxi.org/A33/A33%20Datasheet%20release%201.1.pdf
+        + User Manual
+          http://dl.linux-sunxi.org/A33/A33%20user%20manual%20release%201.1.pdf
+
+      - Allwinner H3 (sun8i)
+        + Datasheet
+          http://dl.linux-sunxi.org/H3/Allwinner_H3_Datasheet_V1.0.pdf
+
     * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
     * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
       - Allwinner A80
       - Allwinner A80
         + Datasheet
         + Datasheet
 	  http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf
 	  http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf
+
+    * Octa ARM Cortex-A7 based SoCs
+      - Allwinner A83T
+        + Not Supported
+        + Datasheet
+          http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf

+ 6 - 0
Documentation/devicetree/bindings/arm/l2cc.txt

@@ -67,6 +67,12 @@ Optional properties:
   disable if zero.
   disable if zero.
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
   0-7, 15, 23, and 31.
   0-7, 15, 23, and 31.
+- arm,shared-override : The default behavior of the pl310 cache controller with
+  respect to the shareable attribute is to transform "normal memory
+  non-cacheable transactions" into "cacheable no allocate" (for reads) or
+  "write through no write allocate" (for writes).
+  On systems where this may cause DMA buffer corruption, this property must be
+  specified to indicate that such transforms are precluded.
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
   (forcibly enable), property absent (retain settings set by firmware)
   (forcibly enable), property absent (retain settings set by firmware)
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),

+ 2 - 0
Documentation/devicetree/bindings/arm/sunxi.txt

@@ -9,4 +9,6 @@ using one of the following compatible strings:
   allwinner,sun6i-a31
   allwinner,sun6i-a31
   allwinner,sun7i-a20
   allwinner,sun7i-a20
   allwinner,sun8i-a23
   allwinner,sun8i-a23
+  allwinner,sun8i-a33
+  allwinner,sun8i-h3
   allwinner,sun9i-a80
   allwinner,sun9i-a80

+ 1 - 0
Documentation/devicetree/bindings/memory-controllers/ti/emif.txt

@@ -8,6 +8,7 @@ of the EMIF IP and memory parts attached to it.
 Required properties:
 Required properties:
 - compatible	: Should be of the form "ti,emif-<ip-rev>" where <ip-rev>
 - compatible	: Should be of the form "ti,emif-<ip-rev>" where <ip-rev>
   is the IP revision of the specific EMIF instance.
   is the IP revision of the specific EMIF instance.
+		  For am437x should be ti,emif-am4372.
 
 
 - phy-type	: <u32> indicating the DDR phy type. Following are the
 - phy-type	: <u32> indicating the DDR phy type. Following are the
   allowed values
   allowed values

+ 11 - 2
Documentation/power/swsusp.txt

@@ -410,8 +410,17 @@ Documentation/usb/persist.txt.
 
 
 Q: Can I suspend-to-disk using a swap partition under LVM?
 Q: Can I suspend-to-disk using a swap partition under LVM?
 
 
-A: No. You can suspend successfully, but you'll not be able to
-resume. uswsusp should be able to work with LVM. See suspend.sf.net.
+A: Yes and No.  You can suspend successfully, but the kernel will not be able
+to resume on its own.  You need an initramfs that can recognize the resume
+situation, activate the logical volume containing the swap volume (but not
+touch any filesystems!), and eventually call
+
+echo -n "$major:$minor" > /sys/power/resume
+
+where $major and $minor are the respective major and minor device numbers of
+the swap volume.
+
+uswsusp works with LVM, too.  See http://suspend.sourceforge.net/
 
 
 Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
 Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
 compiled with the similar configuration files. Anyway I found that
 compiled with the similar configuration files. Anyway I found that

+ 21 - 5
MAINTAINERS

@@ -1614,6 +1614,7 @@ M:	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	arch/arm/boot/dts/vexpress*
 F:	arch/arm/boot/dts/vexpress*
+F:	arch/arm64/boot/dts/arm/vexpress*
 F:	arch/arm/mach-vexpress/
 F:	arch/arm/mach-vexpress/
 F:	*/*/vexpress*
 F:	*/*/vexpress*
 F:	*/*/*/vexpress*
 F:	*/*/*/vexpress*
@@ -2562,19 +2563,31 @@ F:	arch/powerpc/include/uapi/asm/spu*.h
 F:	arch/powerpc/oprofile/*cell*
 F:	arch/powerpc/oprofile/*cell*
 F:	arch/powerpc/platforms/cell/
 F:	arch/powerpc/platforms/cell/
 
 
-CEPH DISTRIBUTED FILE SYSTEM CLIENT
+CEPH COMMON CODE (LIBCEPH)
+M:	Ilya Dryomov <idryomov@gmail.com>
 M:	"Yan, Zheng" <zyan@redhat.com>
 M:	"Yan, Zheng" <zyan@redhat.com>
 M:	Sage Weil <sage@redhat.com>
 M:	Sage Weil <sage@redhat.com>
 L:	ceph-devel@vger.kernel.org
 L:	ceph-devel@vger.kernel.org
 W:	http://ceph.com/
 W:	http://ceph.com/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T:	git git://github.com/ceph/ceph-client.git
 S:	Supported
 S:	Supported
-F:	Documentation/filesystems/ceph.txt
-F:	fs/ceph/
 F:	net/ceph/
 F:	net/ceph/
 F:	include/linux/ceph/
 F:	include/linux/ceph/
 F:	include/linux/crush/
 F:	include/linux/crush/
 
 
+CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
+M:	"Yan, Zheng" <zyan@redhat.com>
+M:	Sage Weil <sage@redhat.com>
+M:	Ilya Dryomov <idryomov@gmail.com>
+L:	ceph-devel@vger.kernel.org
+W:	http://ceph.com/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T:	git git://github.com/ceph/ceph-client.git
+S:	Supported
+F:	Documentation/filesystems/ceph.txt
+F:	fs/ceph/
+
 CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 L:	linux-usb@vger.kernel.org
 L:	linux-usb@vger.kernel.org
 S:	Orphan
 S:	Orphan
@@ -6147,6 +6160,7 @@ L:	linux-nvdimm@lists.01.org
 Q:	https://patchwork.kernel.org/project/linux-nvdimm/list/
 Q:	https://patchwork.kernel.org/project/linux-nvdimm/list/
 S:	Supported
 S:	Supported
 F:	drivers/nvdimm/pmem.c
 F:	drivers/nvdimm/pmem.c
+F:	include/linux/pmem.h
 
 
 LINUX FOR IBM pSERIES (RS/6000)
 LINUX FOR IBM pSERIES (RS/6000)
 M:	Paul Mackerras <paulus@au.ibm.com>
 M:	Paul Mackerras <paulus@au.ibm.com>
@@ -6161,7 +6175,7 @@ M:	Michael Ellerman <mpe@ellerman.id.au>
 W:	http://www.penguinppc.org/
 W:	http://www.penguinppc.org/
 L:	linuxppc-dev@lists.ozlabs.org
 L:	linuxppc-dev@lists.ozlabs.org
 Q:	http://patchwork.ozlabs.org/project/linuxppc-dev/list/
 Q:	http://patchwork.ozlabs.org/project/linuxppc-dev/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
 S:	Supported
 S:	Supported
 F:	Documentation/powerpc/
 F:	Documentation/powerpc/
 F:	arch/powerpc/
 F:	arch/powerpc/
@@ -8366,10 +8380,12 @@ RADOS BLOCK DEVICE (RBD)
 M:	Ilya Dryomov <idryomov@gmail.com>
 M:	Ilya Dryomov <idryomov@gmail.com>
 M:	Sage Weil <sage@redhat.com>
 M:	Sage Weil <sage@redhat.com>
 M:	Alex Elder <elder@kernel.org>
 M:	Alex Elder <elder@kernel.org>
-M:	ceph-devel@vger.kernel.org
+L:	ceph-devel@vger.kernel.org
 W:	http://ceph.com/
 W:	http://ceph.com/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T:	git git://github.com/ceph/ceph-client.git
 S:	Supported
 S:	Supported
+F:	Documentation/ABI/testing/sysfs-bus-rbd
 F:	drivers/block/rbd.c
 F:	drivers/block/rbd.c
 F:	drivers/block/rbd_types.h
 F:	drivers/block/rbd_types.h
 
 

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 2
 PATCHLEVEL = 2
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Hurr durr I'ma sheep
 NAME = Hurr durr I'ma sheep
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 24 - 0
arch/arm/Kconfig

@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
 config ARCH_HAS_BANDGAP
 config ARCH_HAS_BANDGAP
 	bool
 	bool
 
 
+config FIX_EARLYCON_MEM
+	def_bool y if MMU
+
 config GENERIC_HWEIGHT
 config GENERIC_HWEIGHT
 	bool
 	bool
 	default y
 	default y
@@ -1693,6 +1696,27 @@ config HIGHMEM
 config HIGHPTE
 config HIGHPTE
 	bool "Allocate 2nd-level pagetables from highmem"
 	bool "Allocate 2nd-level pagetables from highmem"
 	depends on HIGHMEM
 	depends on HIGHMEM
+	help
+	  The VM uses one page of physical memory for each page table.
+	  For systems with a lot of processes, this can use a lot of
+	  precious low memory, eventually leading to low memory being
+	  consumed by page tables.  Setting this option will allow
+	  user-space 2nd level page tables to reside in high memory.
+
+config CPU_SW_DOMAIN_PAN
+	bool "Enable use of CPU domains to implement privileged no-access"
+	depends on MMU && !ARM_LPAE
+	default y
+	help
+	  Increase kernel security by ensuring that normal kernel accesses
+	  are unable to access userspace addresses.  This can help prevent
+	  use-after-free bugs becoming an exploitable privilege escalation
+	  by ensuring that magic values (such as LIST_POISON) will always
+	  fault when dereferenced.
+
+	  CPUs with low-vector mappings use a best-efforts implementation.
+	  Their lower 1MB needs to remain accessible for the vectors, but
+	  the remainder of userspace will become appropriately inaccessible.
 
 
 config HW_PERF_EVENTS
 config HW_PERF_EVENTS
 	bool "Enable hardware performance counter support for perf events"
 	bool "Enable hardware performance counter support for perf events"

+ 1 - 1
arch/arm/Kconfig.debug

@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
 
 
 config DEBUG_SET_MODULE_RONX
 config DEBUG_SET_MODULE_RONX
 	bool "Set loadable kernel module data as NX and text as RO"
 	bool "Set loadable kernel module data as NX and text as RO"
-	depends on MODULES
+	depends on MODULES && MMU
 	---help---
 	---help---
 	  This option helps catch unintended modifications to loadable
 	  This option helps catch unintended modifications to loadable
 	  kernel module's text and read-only data. It also prevents execution
 	  kernel module's text and read-only data. It also prevents execution

+ 3 - 0
arch/arm/Makefile

@@ -312,6 +312,9 @@ INSTALL_TARGETS	= zinstall uinstall install
 
 
 PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
 PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
 
 
+bootpImage uImage: zImage
+zImage: Image
+
 $(BOOT_TARGETS): vmlinux
 $(BOOT_TARGETS): vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 

+ 4 - 0
arch/arm/boot/dts/am335x-boneblack.dts

@@ -80,3 +80,7 @@
 		status = "okay";
 		status = "okay";
 	};
 	};
 };
 };
+
+&rtc {
+	system-power-controller;
+};

+ 7 - 0
arch/arm/boot/dts/am4372.dtsi

@@ -132,6 +132,12 @@
 			};
 			};
 		};
 		};
 
 
+		emif: emif@4c000000 {
+			compatible = "ti,emif-am4372";
+			reg = <0x4c000000 0x1000000>;
+			ti,hwmods = "emif";
+		};
+
 		edma: edma@49000000 {
 		edma: edma@49000000 {
 			compatible = "ti,edma3";
 			compatible = "ti,edma3";
 			ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
 			ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
@@ -941,6 +947,7 @@
 				ti,hwmods = "dss_rfbi";
 				ti,hwmods = "dss_rfbi";
 				clocks = <&disp_clk>;
 				clocks = <&disp_clk>;
 				clock-names = "fck";
 				clock-names = "fck";
+				status = "disabled";
 			};
 			};
 		};
 		};
 
 

+ 4 - 0
arch/arm/boot/dts/am57xx-beagle-x15.dts

@@ -605,6 +605,10 @@
 	phy-supply = <&ldousb_reg>;
 	phy-supply = <&ldousb_reg>;
 };
 };
 
 
+&usb2_phy2 {
+	phy-supply = <&ldousb_reg>;
+};
+
 &usb1 {
 &usb1 {
 	dr_mode = "host";
 	dr_mode = "host";
 	pinctrl-names = "default";
 	pinctrl-names = "default";

+ 1041 - 1
arch/arm/boot/dts/atlas7.dtsi

@@ -135,6 +135,1025 @@
 			compatible = "sirf,atlas7-ioc";
 			compatible = "sirf,atlas7-ioc";
 			reg = <0x18880000 0x1000>,
 			reg = <0x18880000 0x1000>,
 				<0x10E40000 0x1000>;
 				<0x10E40000 0x1000>;
+
+			audio_ac97_pmx: audio_ac97@0 {
+				audio_ac97 {
+					groups = "audio_ac97_grp";
+					function = "audio_ac97";
+				};
+			};
+
+			audio_func_dbg_pmx: audio_func_dbg@0 {
+				audio_func_dbg {
+					groups = "audio_func_dbg_grp";
+					function = "audio_func_dbg";
+				};
+			};
+
+			audio_i2s_pmx: audio_i2s@0 {
+				audio_i2s {
+					groups = "audio_i2s_grp";
+					function = "audio_i2s";
+				};
+			};
+
+			audio_i2s_2ch_pmx: audio_i2s_2ch@0 {
+				audio_i2s_2ch {
+					groups = "audio_i2s_2ch_grp";
+					function = "audio_i2s_2ch";
+				};
+			};
+
+			audio_i2s_extclk_pmx: audio_i2s_extclk@0 {
+				audio_i2s_extclk {
+					groups = "audio_i2s_extclk_grp";
+					function = "audio_i2s_extclk";
+				};
+			};
+
+			audio_uart0_pmx: audio_uart0@0 {
+				audio_uart0 {
+					groups = "audio_uart0_grp";
+					function = "audio_uart0";
+				};
+			};
+
+			audio_uart1_pmx: audio_uart1@0 {
+				audio_uart1 {
+					groups = "audio_uart1_grp";
+					function = "audio_uart1";
+				};
+			};
+
+			audio_uart2_pmx0: audio_uart2@0 {
+				audio_uart2_0 {
+					groups = "audio_uart2_grp0";
+					function = "audio_uart2_m0";
+				};
+			};
+
+			audio_uart2_pmx1: audio_uart2@1 {
+				audio_uart2_1 {
+					groups = "audio_uart2_grp1";
+					function = "audio_uart2_m1";
+				};
+			};
+
+			c_can_trnsvr_pmx: c_can_trnsvr@0 {
+				c_can_trnsvr {
+					groups = "c_can_trnsvr_grp";
+					function = "c_can_trnsvr";
+				};
+			};
+
+			c0_can_pmx0: c0_can@0 {
+				c0_can_0 {
+					groups = "c0_can_grp0";
+					function = "c0_can_m0";
+				};
+			};
+
+			c0_can_pmx1: c0_can@1 {
+				c0_can_1 {
+					groups = "c0_can_grp1";
+					function = "c0_can_m1";
+				};
+			};
+
+			c1_can_pmx0: c1_can@0 {
+				c1_can_0 {
+					groups = "c1_can_grp0";
+					function = "c1_can_m0";
+				};
+			};
+
+			c1_can_pmx1: c1_can@1 {
+				c1_can_1 {
+					groups = "c1_can_grp1";
+					function = "c1_can_m1";
+				};
+			};
+
+			c1_can_pmx2: c1_can@2 {
+				c1_can_2 {
+					groups = "c1_can_grp2";
+					function = "c1_can_m2";
+				};
+			};
+
+			ca_audio_lpc_pmx: ca_audio_lpc@0 {
+				ca_audio_lpc {
+					groups = "ca_audio_lpc_grp";
+					function = "ca_audio_lpc";
+				};
+			};
+
+			ca_bt_lpc_pmx: ca_bt_lpc@0 {
+				ca_bt_lpc {
+					groups = "ca_bt_lpc_grp";
+					function = "ca_bt_lpc";
+				};
+			};
+
+			ca_coex_pmx: ca_coex@0 {
+				ca_coex {
+					groups = "ca_coex_grp";
+					function = "ca_coex";
+				};
+			};
+
+			ca_curator_lpc_pmx: ca_curator_lpc@0 {
+				ca_curator_lpc {
+					groups = "ca_curator_lpc_grp";
+					function = "ca_curator_lpc";
+				};
+			};
+
+			ca_pcm_debug_pmx: ca_pcm_debug@0 {
+				ca_pcm_debug {
+					groups = "ca_pcm_debug_grp";
+					function = "ca_pcm_debug";
+				};
+			};
+
+			ca_pio_pmx: ca_pio@0 {
+				ca_pio {
+					groups = "ca_pio_grp";
+					function = "ca_pio";
+				};
+			};
+
+			ca_sdio_debug_pmx: ca_sdio_debug@0 {
+				ca_sdio_debug {
+					groups = "ca_sdio_debug_grp";
+					function = "ca_sdio_debug";
+				};
+			};
+
+			ca_spi_pmx: ca_spi@0 {
+				ca_spi {
+					groups = "ca_spi_grp";
+					function = "ca_spi";
+				};
+			};
+
+			ca_trb_pmx: ca_trb@0 {
+				ca_trb {
+					groups = "ca_trb_grp";
+					function = "ca_trb";
+				};
+			};
+
+			ca_uart_debug_pmx: ca_uart_debug@0 {
+				ca_uart_debug {
+					groups = "ca_uart_debug_grp";
+					function = "ca_uart_debug";
+				};
+			};
+
+			clkc_pmx0: clkc@0 {
+				clkc_0 {
+					groups = "clkc_grp0";
+					function = "clkc_m0";
+				};
+			};
+
+			clkc_pmx1: clkc@1 {
+				clkc_1 {
+					groups = "clkc_grp1";
+					function = "clkc_m1";
+				};
+			};
+
+			gn_gnss_i2c_pmx: gn_gnss_i2c@0 {
+				gn_gnss_i2c {
+					groups = "gn_gnss_i2c_grp";
+					function = "gn_gnss_i2c";
+				};
+			};
+
+			gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 {
+				gn_gnss_uart_nopause {
+					groups = "gn_gnss_uart_nopause_grp";
+					function = "gn_gnss_uart_nopause";
+				};
+			};
+
+			gn_gnss_uart_pmx: gn_gnss_uart@0 {
+				gn_gnss_uart {
+					groups = "gn_gnss_uart_grp";
+					function = "gn_gnss_uart";
+				};
+			};
+
+			gn_trg_spi_pmx0: gn_trg_spi@0 {
+				gn_trg_spi_0 {
+					groups = "gn_trg_spi_grp0";
+					function = "gn_trg_spi_m0";
+				};
+			};
+
+			gn_trg_spi_pmx1: gn_trg_spi@1 {
+				gn_trg_spi_1 {
+					groups = "gn_trg_spi_grp1";
+					function = "gn_trg_spi_m1";
+				};
+			};
+
+			cvbs_dbg_pmx: cvbs_dbg@0 {
+				cvbs_dbg {
+					groups = "cvbs_dbg_grp";
+					function = "cvbs_dbg";
+				};
+			};
+
+			cvbs_dbg_test_pmx0: cvbs_dbg_test@0 {
+				cvbs_dbg_test_0 {
+					groups = "cvbs_dbg_test_grp0";
+					function = "cvbs_dbg_test_m0";
+				};
+			};
+
+			cvbs_dbg_test_pmx1: cvbs_dbg_test@1 {
+				cvbs_dbg_test_1 {
+					groups = "cvbs_dbg_test_grp1";
+					function = "cvbs_dbg_test_m1";
+				};
+			};
+
+			cvbs_dbg_test_pmx2: cvbs_dbg_test@2 {
+				cvbs_dbg_test_2 {
+					groups = "cvbs_dbg_test_grp2";
+					function = "cvbs_dbg_test_m2";
+				};
+			};
+
+			cvbs_dbg_test_pmx3: cvbs_dbg_test@3 {
+				cvbs_dbg_test_3 {
+					groups = "cvbs_dbg_test_grp3";
+					function = "cvbs_dbg_test_m3";
+				};
+			};
+
+			cvbs_dbg_test_pmx4: cvbs_dbg_test@4 {
+				cvbs_dbg_test_4 {
+					groups = "cvbs_dbg_test_grp4";
+					function = "cvbs_dbg_test_m4";
+				};
+			};
+
+			cvbs_dbg_test_pmx5: cvbs_dbg_test@5 {
+				cvbs_dbg_test_5 {
+					groups = "cvbs_dbg_test_grp5";
+					function = "cvbs_dbg_test_m5";
+				};
+			};
+
+			cvbs_dbg_test_pmx6: cvbs_dbg_test@6 {
+				cvbs_dbg_test_6 {
+					groups = "cvbs_dbg_test_grp6";
+					function = "cvbs_dbg_test_m6";
+				};
+			};
+
+			cvbs_dbg_test_pmx7: cvbs_dbg_test@7 {
+				cvbs_dbg_test_7 {
+					groups = "cvbs_dbg_test_grp7";
+					function = "cvbs_dbg_test_m7";
+				};
+			};
+
+			cvbs_dbg_test_pmx8: cvbs_dbg_test@8 {
+				cvbs_dbg_test_8 {
+					groups = "cvbs_dbg_test_grp8";
+					function = "cvbs_dbg_test_m8";
+				};
+			};
+
+			cvbs_dbg_test_pmx9: cvbs_dbg_test@9 {
+				cvbs_dbg_test_9 {
+					groups = "cvbs_dbg_test_grp9";
+					function = "cvbs_dbg_test_m9";
+				};
+			};
+
+			cvbs_dbg_test_pmx10: cvbs_dbg_test@10 {
+				cvbs_dbg_test_10 {
+					groups = "cvbs_dbg_test_grp10";
+					function = "cvbs_dbg_test_m10";
+				};
+			};
+
+			cvbs_dbg_test_pmx11: cvbs_dbg_test@11 {
+				cvbs_dbg_test_11 {
+					groups = "cvbs_dbg_test_grp11";
+					function = "cvbs_dbg_test_m11";
+				};
+			};
+
+			cvbs_dbg_test_pmx12: cvbs_dbg_test@12 {
+				cvbs_dbg_test_12 {
+					groups = "cvbs_dbg_test_grp12";
+					function = "cvbs_dbg_test_m12";
+				};
+			};
+
+			cvbs_dbg_test_pmx13: cvbs_dbg_test@13 {
+				cvbs_dbg_test_13 {
+					groups = "cvbs_dbg_test_grp13";
+					function = "cvbs_dbg_test_m13";
+				};
+			};
+
+			cvbs_dbg_test_pmx14: cvbs_dbg_test@14 {
+				cvbs_dbg_test_14 {
+					groups = "cvbs_dbg_test_grp14";
+					function = "cvbs_dbg_test_m14";
+				};
+			};
+
+			cvbs_dbg_test_pmx15: cvbs_dbg_test@15 {
+				cvbs_dbg_test_15 {
+					groups = "cvbs_dbg_test_grp15";
+					function = "cvbs_dbg_test_m15";
+				};
+			};
+
+			gn_gnss_power_pmx: gn_gnss_power@0 {
+				gn_gnss_power {
+					groups = "gn_gnss_power_grp";
+					function = "gn_gnss_power";
+				};
+			};
+
+			gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 {
+				gn_gnss_sw_status {
+					groups = "gn_gnss_sw_status_grp";
+					function = "gn_gnss_sw_status";
+				};
+			};
+
+			gn_gnss_eclk_pmx: gn_gnss_eclk@0 {
+				gn_gnss_eclk {
+					groups = "gn_gnss_eclk_grp";
+					function = "gn_gnss_eclk";
+				};
+			};
+
+			gn_gnss_irq1_pmx0: gn_gnss_irq1@0 {
+				gn_gnss_irq1_0 {
+					groups = "gn_gnss_irq1_grp0";
+					function = "gn_gnss_irq1_m0";
+				};
+			};
+
+			gn_gnss_irq2_pmx0: gn_gnss_irq2@0 {
+				gn_gnss_irq2_0 {
+					groups = "gn_gnss_irq2_grp0";
+					function = "gn_gnss_irq2_m0";
+				};
+			};
+
+			gn_gnss_tm_pmx: gn_gnss_tm@0 {
+				gn_gnss_tm {
+					groups = "gn_gnss_tm_grp";
+					function = "gn_gnss_tm";
+				};
+			};
+
+			gn_gnss_tsync_pmx: gn_gnss_tsync@0 {
+				gn_gnss_tsync {
+					groups = "gn_gnss_tsync_grp";
+					function = "gn_gnss_tsync";
+				};
+			};
+
+			gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 {
+				gn_io_gnsssys_sw_cfg {
+					groups = "gn_io_gnsssys_sw_cfg_grp";
+					function = "gn_io_gnsssys_sw_cfg";
+				};
+			};
+
+			gn_trg_pmx0: gn_trg@0 {
+				gn_trg_0 {
+					groups = "gn_trg_grp0";
+					function = "gn_trg_m0";
+				};
+			};
+
+			gn_trg_pmx1: gn_trg@1 {
+				gn_trg_1 {
+					groups = "gn_trg_grp1";
+					function = "gn_trg_m1";
+				};
+			};
+
+			gn_trg_shutdown_pmx0: gn_trg_shutdown@0 {
+				gn_trg_shutdown_0 {
+					groups = "gn_trg_shutdown_grp0";
+					function = "gn_trg_shutdown_m0";
+				};
+			};
+
+			gn_trg_shutdown_pmx1: gn_trg_shutdown@1 {
+				gn_trg_shutdown_1 {
+					groups = "gn_trg_shutdown_grp1";
+					function = "gn_trg_shutdown_m1";
+				};
+			};
+
+			gn_trg_shutdown_pmx2: gn_trg_shutdown@2 {
+				gn_trg_shutdown_2 {
+					groups = "gn_trg_shutdown_grp2";
+					function = "gn_trg_shutdown_m2";
+				};
+			};
+
+			gn_trg_shutdown_pmx3: gn_trg_shutdown@3 {
+				gn_trg_shutdown_3 {
+					groups = "gn_trg_shutdown_grp3";
+					function = "gn_trg_shutdown_m3";
+				};
+			};
+
+			i2c0_pmx: i2c0@0 {
+				i2c0 {
+					groups = "i2c0_grp";
+					function = "i2c0";
+				};
+			};
+
+			i2c1_pmx: i2c1@0 {
+				i2c1 {
+					groups = "i2c1_grp";
+					function = "i2c1";
+				};
+			};
+
+			jtag_pmx0: jtag@0 {
+				jtag_0 {
+					groups = "jtag_grp0";
+					function = "jtag_m0";
+				};
+			};
+
+			ks_kas_spi_pmx0: ks_kas_spi@0 {
+				ks_kas_spi_0 {
+					groups = "ks_kas_spi_grp0";
+					function = "ks_kas_spi_m0";
+				};
+			};
+
+			ld_ldd_pmx: ld_ldd@0 {
+				ld_ldd {
+					groups = "ld_ldd_grp";
+					function = "ld_ldd";
+				};
+			};
+
+			ld_ldd_16bit_pmx: ld_ldd_16bit@0 {
+				ld_ldd_16bit {
+					groups = "ld_ldd_16bit_grp";
+					function = "ld_ldd_16bit";
+				};
+			};
+
+			ld_ldd_fck_pmx: ld_ldd_fck@0 {
+				ld_ldd_fck {
+					groups = "ld_ldd_fck_grp";
+					function = "ld_ldd_fck";
+				};
+			};
+
+			ld_ldd_lck_pmx: ld_ldd_lck@0 {
+				ld_ldd_lck {
+					groups = "ld_ldd_lck_grp";
+					function = "ld_ldd_lck";
+				};
+			};
+
+			lr_lcdrom_pmx: lr_lcdrom@0 {
+				lr_lcdrom {
+					groups = "lr_lcdrom_grp";
+					function = "lr_lcdrom";
+				};
+			};
+
+			lvds_analog_pmx: lvds_analog@0 {
+				lvds_analog {
+					groups = "lvds_analog_grp";
+					function = "lvds_analog";
+				};
+			};
+
+			nd_df_pmx: nd_df@0 {
+				nd_df {
+					groups = "nd_df_grp";
+					function = "nd_df";
+				};
+			};
+
+			nd_df_nowp_pmx: nd_df_nowp@0 {
+				nd_df_nowp {
+					groups = "nd_df_nowp_grp";
+					function = "nd_df_nowp";
+				};
+			};
+
+			ps_pmx: ps@0 {
+				ps {
+					groups = "ps_grp";
+					function = "ps";
+				};
+			};
+
+			pwc_core_on_pmx: pwc_core_on@0 {
+				pwc_core_on {
+					groups = "pwc_core_on_grp";
+					function = "pwc_core_on";
+				};
+			};
+
+			pwc_ext_on_pmx: pwc_ext_on@0 {
+				pwc_ext_on {
+					groups = "pwc_ext_on_grp";
+					function = "pwc_ext_on";
+				};
+			};
+
+			pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 {
+				pwc_gpio3_clk {
+					groups = "pwc_gpio3_clk_grp";
+					function = "pwc_gpio3_clk";
+				};
+			};
+
+			pwc_io_on_pmx: pwc_io_on@0 {
+				pwc_io_on {
+					groups = "pwc_io_on_grp";
+					function = "pwc_io_on";
+				};
+			};
+
+			pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 {
+				pwc_lowbatt_b_0 {
+					groups = "pwc_lowbatt_b_grp0";
+					function = "pwc_lowbatt_b_m0";
+				};
+			};
+
+			pwc_mem_on_pmx: pwc_mem_on@0 {
+				pwc_mem_on {
+					groups = "pwc_mem_on_grp";
+					function = "pwc_mem_on";
+				};
+			};
+
+			pwc_on_key_b_pmx0: pwc_on_key_b@0 {
+				pwc_on_key_b_0 {
+					groups = "pwc_on_key_b_grp0";
+					function = "pwc_on_key_b_m0";
+				};
+			};
+
+			pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 {
+				pwc_wakeup_src0 {
+					groups = "pwc_wakeup_src0_grp";
+					function = "pwc_wakeup_src0";
+				};
+			};
+
+			pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 {
+				pwc_wakeup_src1 {
+					groups = "pwc_wakeup_src1_grp";
+					function = "pwc_wakeup_src1";
+				};
+			};
+
+			pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 {
+				pwc_wakeup_src2 {
+					groups = "pwc_wakeup_src2_grp";
+					function = "pwc_wakeup_src2";
+				};
+			};
+
+			pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 {
+				pwc_wakeup_src3 {
+					groups = "pwc_wakeup_src3_grp";
+					function = "pwc_wakeup_src3";
+				};
+			};
+
+			pw_cko0_pmx0: pw_cko0@0 {
+				pw_cko0_0 {
+					groups = "pw_cko0_grp0";
+					function = "pw_cko0_m0";
+				};
+			};
+
+			pw_cko0_pmx1: pw_cko0@1 {
+				pw_cko0_1 {
+					groups = "pw_cko0_grp1";
+					function = "pw_cko0_m1";
+				};
+			};
+
+			pw_cko0_pmx2: pw_cko0@2 {
+				pw_cko0_2 {
+					groups = "pw_cko0_grp2";
+					function = "pw_cko0_m2";
+				};
+			};
+
+			pw_cko1_pmx0: pw_cko1@0 {
+				pw_cko1_0 {
+					groups = "pw_cko1_grp0";
+					function = "pw_cko1_m0";
+				};
+			};
+
+			pw_cko1_pmx1: pw_cko1@1 {
+				pw_cko1_1 {
+					groups = "pw_cko1_grp1";
+					function = "pw_cko1_m1";
+				};
+			};
+
+			pw_i2s01_clk_pmx0: pw_i2s01_clk@0 {
+				pw_i2s01_clk_0 {
+					groups = "pw_i2s01_clk_grp0";
+					function = "pw_i2s01_clk_m0";
+				};
+			};
+
+			pw_i2s01_clk_pmx1: pw_i2s01_clk@1 {
+				pw_i2s01_clk_1 {
+					groups = "pw_i2s01_clk_grp1";
+					function = "pw_i2s01_clk_m1";
+				};
+			};
+
+			pw_pwm0_pmx: pw_pwm0@0 {
+				pw_pwm0 {
+					groups = "pw_pwm0_grp";
+					function = "pw_pwm0";
+				};
+			};
+
+			pw_pwm1_pmx: pw_pwm1@0 {
+				pw_pwm1 {
+					groups = "pw_pwm1_grp";
+					function = "pw_pwm1";
+				};
+			};
+
+			pw_pwm2_pmx0: pw_pwm2@0 {
+				pw_pwm2_0 {
+					groups = "pw_pwm2_grp0";
+					function = "pw_pwm2_m0";
+				};
+			};
+
+			pw_pwm2_pmx1: pw_pwm2@1 {
+				pw_pwm2_1 {
+					groups = "pw_pwm2_grp1";
+					function = "pw_pwm2_m1";
+				};
+			};
+
+			pw_pwm3_pmx0: pw_pwm3@0 {
+				pw_pwm3_0 {
+					groups = "pw_pwm3_grp0";
+					function = "pw_pwm3_m0";
+				};
+			};
+
+			pw_pwm3_pmx1: pw_pwm3@1 {
+				pw_pwm3_1 {
+					groups = "pw_pwm3_grp1";
+					function = "pw_pwm3_m1";
+				};
+			};
+
+			pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 {
+				pw_pwm_cpu_vol_0 {
+					groups = "pw_pwm_cpu_vol_grp0";
+					function = "pw_pwm_cpu_vol_m0";
+				};
+			};
+
+			pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 {
+				pw_pwm_cpu_vol_1 {
+					groups = "pw_pwm_cpu_vol_grp1";
+					function = "pw_pwm_cpu_vol_m1";
+				};
+			};
+
+			pw_backlight_pmx0: pw_backlight@0 {
+				pw_backlight_0 {
+					groups = "pw_backlight_grp0";
+					function = "pw_backlight_m0";
+				};
+			};
+
+			pw_backlight_pmx1: pw_backlight@1 {
+				pw_backlight_1 {
+					groups = "pw_backlight_grp1";
+					function = "pw_backlight_m1";
+				};
+			};
+
+			rg_eth_mac_pmx: rg_eth_mac@0 {
+				rg_eth_mac {
+					groups = "rg_eth_mac_grp";
+					function = "rg_eth_mac";
+				};
+			};
+
+			rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 {
+				rg_gmac_phy_intr_n {
+					groups = "rg_gmac_phy_intr_n_grp";
+					function = "rg_gmac_phy_intr_n";
+				};
+			};
+
+			rg_rgmii_mac_pmx: rg_rgmii_mac@0 {
+				rg_rgmii_mac {
+					groups = "rg_rgmii_mac_grp";
+					function = "rg_rgmii_mac";
+				};
+			};
+
+			rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 {
+				rg_rgmii_phy_ref_clk_0 {
+					groups =
+						"rg_rgmii_phy_ref_clk_grp0";
+					function =
+						"rg_rgmii_phy_ref_clk_m0";
+				};
+			};
+
+			rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 {
+				rg_rgmii_phy_ref_clk_1 {
+					groups =
+						"rg_rgmii_phy_ref_clk_grp1";
+					function =
+						"rg_rgmii_phy_ref_clk_m1";
+				};
+			};
+
+			sd0_pmx: sd0@0 {
+				sd0 {
+					groups = "sd0_grp";
+					function = "sd0";
+				};
+			};
+
+			sd0_4bit_pmx: sd0_4bit@0 {
+				sd0_4bit {
+					groups = "sd0_4bit_grp";
+					function = "sd0_4bit";
+				};
+			};
+
+			sd1_pmx: sd1@0 {
+				sd1 {
+					groups = "sd1_grp";
+					function = "sd1";
+				};
+			};
+
+			sd1_4bit_pmx0: sd1_4bit@0 {
+				sd1_4bit_0 {
+					groups = "sd1_4bit_grp0";
+					function = "sd1_4bit_m0";
+				};
+			};
+
+			sd1_4bit_pmx1: sd1_4bit@1 {
+				sd1_4bit_1 {
+					groups = "sd1_4bit_grp1";
+					function = "sd1_4bit_m1";
+				};
+			};
+
+			sd2_pmx0: sd2@0 {
+				sd2_0 {
+					groups = "sd2_grp0";
+					function = "sd2_m0";
+				};
+			};
+
+			sd2_no_cdb_pmx0: sd2_no_cdb@0 {
+				sd2_no_cdb_0 {
+					groups = "sd2_no_cdb_grp0";
+					function = "sd2_no_cdb_m0";
+				};
+			};
+
+			sd3_pmx: sd3@0 {
+				sd3 {
+					groups = "sd3_grp";
+					function = "sd3";
+				};
+			};
+
+			sd5_pmx: sd5@0 {
+				sd5 {
+					groups = "sd5_grp";
+					function = "sd5";
+				};
+			};
+
+			sd6_pmx0: sd6@0 {
+				sd6_0 {
+					groups = "sd6_grp0";
+					function = "sd6_m0";
+				};
+			};
+
+			sd6_pmx1: sd6@1 {
+				sd6_1 {
+					groups = "sd6_grp1";
+					function = "sd6_m1";
+				};
+			};
+
+			sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 {
+				sp0_ext_ldo_on {
+					groups = "sp0_ext_ldo_on_grp";
+					function = "sp0_ext_ldo_on";
+				};
+			};
+
+			sp0_qspi_pmx: sp0_qspi@0 {
+				sp0_qspi {
+					groups = "sp0_qspi_grp";
+					function = "sp0_qspi";
+				};
+			};
+
+			sp1_spi_pmx: sp1_spi@0 {
+				sp1_spi {
+					groups = "sp1_spi_grp";
+					function = "sp1_spi";
+				};
+			};
+
+			tpiu_trace_pmx: tpiu_trace@0 {
+				tpiu_trace {
+					groups = "tpiu_trace_grp";
+					function = "tpiu_trace";
+				};
+			};
+
+			uart0_pmx: uart0@0 {
+				uart0 {
+					groups = "uart0_grp";
+					function = "uart0";
+				};
+			};
+
+			uart0_nopause_pmx: uart0_nopause@0 {
+				uart0_nopause {
+					groups = "uart0_nopause_grp";
+					function = "uart0_nopause";
+				};
+			};
+
+			uart1_pmx: uart1@0 {
+				uart1 {
+					groups = "uart1_grp";
+					function = "uart1";
+				};
+			};
+
+			uart2_pmx: uart2@0 {
+				uart2 {
+					groups = "uart2_grp";
+					function = "uart2";
+				};
+			};
+
+			uart3_pmx0: uart3@0 {
+				uart3_0 {
+					groups = "uart3_grp0";
+					function = "uart3_m0";
+				};
+			};
+
+			uart3_pmx1: uart3@1 {
+				uart3_1 {
+					groups = "uart3_grp1";
+					function = "uart3_m1";
+				};
+			};
+
+			uart3_pmx2: uart3@2 {
+				uart3_2 {
+					groups = "uart3_grp2";
+					function = "uart3_m2";
+				};
+			};
+
+			uart3_pmx3: uart3@3 {
+				uart3_3 {
+					groups = "uart3_grp3";
+					function = "uart3_m3";
+				};
+			};
+
+			uart3_nopause_pmx0: uart3_nopause@0 {
+				uart3_nopause_0 {
+					groups = "uart3_nopause_grp0";
+					function = "uart3_nopause_m0";
+				};
+			};
+
+			uart3_nopause_pmx1: uart3_nopause@1 {
+				uart3_nopause_1 {
+					groups = "uart3_nopause_grp1";
+					function = "uart3_nopause_m1";
+				};
+			};
+
+			uart4_pmx0: uart4@0 {
+				uart4_0 {
+					groups = "uart4_grp0";
+					function = "uart4_m0";
+				};
+			};
+
+			uart4_pmx1: uart4@1 {
+				uart4_1 {
+					groups = "uart4_grp1";
+					function = "uart4_m1";
+				};
+			};
+
+			uart4_pmx2: uart4@2 {
+				uart4_2 {
+					groups = "uart4_grp2";
+					function = "uart4_m2";
+				};
+			};
+
+			uart4_nopause_pmx: uart4_nopause@0 {
+				uart4_nopause {
+					groups = "uart4_nopause_grp";
+					function = "uart4_nopause";
+				};
+			};
+
+			usb0_drvvbus_pmx: usb0_drvvbus@0 {
+				usb0_drvvbus {
+					groups = "usb0_drvvbus_grp";
+					function = "usb0_drvvbus";
+				};
+			};
+
+			usb1_drvvbus_pmx: usb1_drvvbus@0 {
+				usb1_drvvbus {
+					groups = "usb1_drvvbus_grp";
+					function = "usb1_drvvbus";
+				};
+			};
+
+			visbus_dout_pmx: visbus_dout@0 {
+				visbus_dout {
+					groups = "visbus_dout_grp";
+					function = "visbus_dout";
+				};
+			};
+
+			vi_vip1_pmx: vi_vip1@0 {
+				vi_vip1 {
+					groups = "vi_vip1_grp";
+					function = "vi_vip1";
+				};
+			};
+
+			vi_vip1_ext_pmx: vi_vip1_ext@0 {
+				vi_vip1_ext {
+					groups = "vi_vip1_ext_grp";
+					function = "vi_vip1_ext";
+				};
+			};
+
+			vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 {
+				vi_vip1_low8bit {
+					groups = "vi_vip1_low8bit_grp";
+					function = "vi_vip1_low8bit";
+				};
+			};
+
+			vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 {
+				vi_vip1_high8bit {
+					groups = "vi_vip1_high8bit_grp";
+					function = "vi_vip1_high8bit";
+				};
+			};
 		};
 		};
 
 
 		pmipc {
 		pmipc {
@@ -356,6 +1375,12 @@
 				clock-names = "gpio0_io";
 				clock-names = "gpio0_io";
 				gpio-controller;
 				gpio-controller;
 				interrupt-controller;
 				interrupt-controller;
+
+				gpio-banks = <2>;
+				gpio-ranges = <&pinctrl 0 0 0>,
+						<&pinctrl 32 0 0>;
+				gpio-ranges-group-names = "lvds_gpio_grp",
+							"uart_nand_gpio_grp";
 			};
 			};
 
 
 			nand@17050000 {
 			nand@17050000 {
@@ -461,11 +1486,22 @@
 				#interrupt-cells = <2>;
 				#interrupt-cells = <2>;
 				compatible = "sirf,atlas7-gpio";
 				compatible = "sirf,atlas7-gpio";
 				reg = <0x13300000 0x1000>;
 				reg = <0x13300000 0x1000>;
-				interrupts = <0 43 0>, <0 44 0>, <0 45 0>;
+				interrupts = <0 43 0>, <0 44 0>,
+						<0 45 0>, <0 46 0>;
 				clocks = <&car 84>;
 				clocks = <&car 84>;
 				clock-names = "gpio1_io";
 				clock-names = "gpio1_io";
 				gpio-controller;
 				gpio-controller;
 				interrupt-controller;
 				interrupt-controller;
+
+				gpio-banks = <4>;
+				gpio-ranges = <&pinctrl 0 0 0>,
+						<&pinctrl 32 0 0>,
+						<&pinctrl 64 0 0>,
+						<&pinctrl 96 0 0>;
+				gpio-ranges-group-names = "gnss_gpio_grp",
+							"lcd_vip_gpio_grp",
+							"sdio_i2s_gpio_grp",
+							"sp_rgmii_gpio_grp";
 			};
 			};
 
 
 			sd2: sdhci@14200000 {
 			sd2: sdhci@14200000 {
@@ -744,6 +1780,10 @@
 				interrupts = <0 47 0>;
 				interrupts = <0 47 0>;
 				gpio-controller;
 				gpio-controller;
 				interrupt-controller;
 				interrupt-controller;
+
+				gpio-banks = <1>;
+				gpio-ranges = <&pinctrl 0 0 0>;
+				gpio-ranges-group-names = "rtc_gpio_grp";
 			};
 			};
 
 
 			rtc-iobg@18840000 {
 			rtc-iobg@18840000 {

+ 23 - 2
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts

@@ -150,6 +150,16 @@
 			interface-type = "ace";
 			interface-type = "ace";
 			reg = <0x5000 0x1000>;
 			reg = <0x5000 0x1000>;
 		};
 		};
+
+		pmu@9000 {
+			 compatible = "arm,cci-400-pmu,r0";
+			 reg = <0x9000 0x5000>;
+			 interrupts = <0 105 4>,
+				      <0 101 4>,
+				      <0 102 4>,
+				      <0 103 4>,
+				      <0 104 4>;
+		};
 	};
 	};
 
 
 	memory-controller@7ffd0000 {
 	memory-controller@7ffd0000 {
@@ -187,11 +197,22 @@
 			     <1 10 0xf08>;
 			     <1 10 0xf08>;
 	};
 	};
 
 
-	pmu {
+	pmu_a15 {
 		compatible = "arm,cortex-a15-pmu";
 		compatible = "arm,cortex-a15-pmu";
 		interrupts = <0 68 4>,
 		interrupts = <0 68 4>,
 			     <0 69 4>;
 			     <0 69 4>;
-		interrupt-affinity = <&cpu0>, <&cpu1>;
+		interrupt-affinity = <&cpu0>,
+				     <&cpu1>;
+	};
+
+	pmu_a7 {
+		compatible = "arm,cortex-a7-pmu";
+		interrupts = <0 128 4>,
+			     <0 129 4>,
+			     <0 130 4>;
+		interrupt-affinity = <&cpu2>,
+				     <&cpu3>,
+				     <&cpu4>;
 	};
 	};
 
 
 	oscclk6a: oscclk6a {
 	oscclk6a: oscclk6a {

+ 4 - 8
arch/arm/common/mcpm_platsmp.c

@@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu)
 	return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
 	return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
 }
 }
 
 
-static int mcpm_cpu_disable(unsigned int cpu)
+static bool mcpm_cpu_can_disable(unsigned int cpu)
 {
 {
-	/*
-	 * We assume all CPUs may be shut down.
-	 * This would be the hook to use for eventual Secure
-	 * OS migration requests as described in the PSCI spec.
-	 */
-	return 0;
+	/* We assume all CPUs may be shut down. */
+	return true;
 }
 }
 
 
 static void mcpm_cpu_die(unsigned int cpu)
 static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
 	.smp_secondary_init	= mcpm_secondary_init,
 	.smp_secondary_init	= mcpm_secondary_init,
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_kill		= mcpm_cpu_kill,
 	.cpu_kill		= mcpm_cpu_kill,
-	.cpu_disable		= mcpm_cpu_disable,
+	.cpu_can_disable	= mcpm_cpu_can_disable,
 	.cpu_die		= mcpm_cpu_die,
 	.cpu_die		= mcpm_cpu_die,
 #endif
 #endif
 };
 };

+ 0 - 1
arch/arm/configs/multi_v7_defconfig

@@ -353,7 +353,6 @@ CONFIG_POWER_RESET_AS3722=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_GPIO_RESTART=y
 CONFIG_POWER_RESET_GPIO_RESTART=y
 CONFIG_POWER_RESET_KEYSTONE=y
 CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_SUN6I=y
 CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
 CONFIG_SENSORS_LM95245=y

+ 5 - 1
arch/arm/configs/sunxi_defconfig

@@ -2,6 +2,7 @@ CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_PERF_EVENTS=y
 CONFIG_PERF_EVENTS=y
+CONFIG_MODULES=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_SMP=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=8
 CONFIG_NR_CPUS=8
@@ -77,7 +78,6 @@ CONFIG_SPI_SUN6I=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_SUN6I=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
 CONFIG_CPU_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG=y
@@ -87,6 +87,10 @@ CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_GPIO=y
+CONFIG_FB=y
+CONFIG_FB_SIMPLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_USB=y
 CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y

+ 0 - 1
arch/arm/include/asm/Kbuild

@@ -12,7 +12,6 @@ generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += kdebug.h
 generic-y += local.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += local64.h
-generic-y += mcs_spinlock.h
 generic-y += msgbuf.h
 generic-y += msgbuf.h
 generic-y += param.h
 generic-y += param.h
 generic-y += parport.h
 generic-y += parport.h

+ 47 - 0
arch/arm/include/asm/assembler.h

@@ -449,6 +449,53 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 #endif
 #endif
 	.endm
 	.endm
 
 
+	.macro	uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_DISABLE
+	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_ENABLE
+	mcr	p15, 0, \tmp, c3, c0, 0
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	mrc	p15, 0, \tmp, c3, c0, 0
+	str	\tmp, [sp, #S_FRAME_SIZE]
+#endif
+	.endm
+
+	.macro	uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	ldr	r0, [sp, #S_FRAME_SIZE]
+	mcr	p15, 0, r0, c3, c0, 0
+#endif
+	.endm
+
+	.macro	uaccess_save_and_disable, tmp
+	uaccess_save \tmp
+	uaccess_disable \tmp
+	.endm
+
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.macro	ret\c, reg
 	.macro	ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
 #if __LINUX_ARM_ARCH__ < 6

+ 10 - 3
arch/arm/include/asm/barrier.h

@@ -2,7 +2,6 @@
 #define __ASM_BARRIER_H
 #define __ASM_BARRIER_H
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
-#include <asm/outercache.h>
 
 
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
 
@@ -37,12 +36,20 @@
 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 #endif
 
 
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
 #ifdef CONFIG_ARCH_HAS_BARRIERS
 #ifdef CONFIG_ARCH_HAS_BARRIERS
 #include <mach/barriers.h>
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb()		do { dsb(); outer_sync(); } while (0)
+#define mb()		__arm_heavy_mb()
 #define rmb()		dsb()
 #define rmb()		dsb()
-#define wmb()		do { dsb(st); outer_sync(); } while (0)
+#define wmb()		__arm_heavy_mb(st)
 #define dma_rmb()	dmb(osh)
 #define dma_rmb()	dmb(osh)
 #define dma_wmb()	dmb(oshst)
 #define dma_wmb()	dmb(oshst)
 #else
 #else

+ 12 - 12
arch/arm/include/asm/bitops.h

@@ -35,9 +35,9 @@
 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 
 	raw_local_irq_save(flags);
 	raw_local_irq_save(flags);
 	*p |= mask;
 	*p |= mask;
@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 
 	raw_local_irq_save(flags);
 	raw_local_irq_save(flags);
 	*p &= ~mask;
 	*p &= ~mask;
@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 
 	raw_local_irq_save(flags);
 	raw_local_irq_save(flags);
 	*p ^= mask;
 	*p ^= mask;
@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	unsigned int res;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 
 	raw_local_irq_save(flags);
 	raw_local_irq_save(flags);
 	res = *p;
 	res = *p;
@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	unsigned int res;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 
 	raw_local_irq_save(flags);
 	raw_local_irq_save(flags);
 	res = *p;
 	res = *p;
@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	unsigned int res;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 
 	raw_local_irq_save(flags);
 	raw_local_irq_save(flags);
 	res = *p;
 	res = *p;

+ 17 - 4
arch/arm/include/asm/cacheflush.h

@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
  * is visible to DMA, or data written by DMA to system memory is
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  * visible to the CPU.
  */
  */
-#define dmac_map_area			cpu_cache.dma_map_area
-#define dmac_unmap_area			cpu_cache.dma_unmap_area
 #define dmac_flush_range		cpu_cache.dma_flush_range
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 
 #else
 #else
@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
  * is visible to DMA, or data written by DMA to system memory is
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  * visible to the CPU.
  */
  */
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_flush_range(const void *, const void *);
 extern void dmac_flush_range(const void *, const void *);
 
 
 #endif
 #endif
@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
 			     void *kaddr, unsigned long len);
 			     void *kaddr, unsigned long len);
 
 
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side.  This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+	phys_addr_t phys = __pa(addr);
+
+	__cpuc_flush_dcache_area((void *)addr, size);
+	outer_flush_range(phys, phys + size);
+}
+
 #endif
 #endif

+ 1 - 1
arch/arm/include/asm/dma-mapping.h

@@ -14,7 +14,7 @@
 #include <xen/xen.h>
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypervisor.h>
 
 
-#define DMA_ERROR_CODE	(~0)
+#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 
 

+ 43 - 10
arch/arm/include/asm/domain.h

@@ -34,15 +34,14 @@
  */
  */
 #ifndef CONFIG_IO_36
 #ifndef CONFIG_IO_36
 #define DOMAIN_KERNEL	0
 #define DOMAIN_KERNEL	0
-#define DOMAIN_TABLE	0
 #define DOMAIN_USER	1
 #define DOMAIN_USER	1
 #define DOMAIN_IO	2
 #define DOMAIN_IO	2
 #else
 #else
 #define DOMAIN_KERNEL	2
 #define DOMAIN_KERNEL	2
-#define DOMAIN_TABLE	2
 #define DOMAIN_USER	1
 #define DOMAIN_USER	1
 #define DOMAIN_IO	0
 #define DOMAIN_IO	0
 #endif
 #endif
+#define DOMAIN_VECTORS	3
 
 
 /*
 /*
  * Domain types
  * Domain types
@@ -55,11 +54,46 @@
 #define DOMAIN_MANAGER	1
 #define DOMAIN_MANAGER	1
 #endif
 #endif
 
 
-#define domain_val(dom,type)	((type) << (2*(dom)))
+#define domain_mask(dom)	((3) << (2 * (dom)))
+#define domain_val(dom,type)	((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+	domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+	unsigned int domain;
+
+	asm(
+	"mrc	p15, 0, %0, c3, c0	@ get domain"
+	 : "=r" (domain));
+
+	return domain;
+}
+
 static inline void set_domain(unsigned val)
 static inline void set_domain(unsigned val)
 {
 {
 	asm volatile(
 	asm volatile(
@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
 	isb();
 	isb();
 }
 }
 
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)					\
 #define modify_domain(dom,type)					\
 	do {							\
 	do {							\
-	struct thread_info *thread = current_thread_info();	\
-	unsigned int domain = thread->cpu_domain;		\
-	domain &= ~domain_val(dom, DOMAIN_MANAGER);		\
-	thread->cpu_domain = domain | domain_val(dom, type);	\
-	set_domain(thread->cpu_domain);				\
+		unsigned int domain = get_domain();		\
+		domain &= ~domain_mask(dom);			\
+		domain = domain | domain_val(dom, type);	\
+		set_domain(domain);				\
 	} while (0)
 	} while (0)
 
 
 #else
 #else
-static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
 #endif
 #endif
 
 

+ 14 - 1
arch/arm/include/asm/fixmap.h

@@ -6,9 +6,13 @@
 #define FIXADDR_TOP		(FIXADDR_END - PAGE_SIZE)
 #define FIXADDR_TOP		(FIXADDR_END - PAGE_SIZE)
 
 
 #include <asm/kmap_types.h>
 #include <asm/kmap_types.h>
+#include <asm/pgtable.h>
 
 
 enum fixed_addresses {
 enum fixed_addresses {
-	FIX_KMAP_BEGIN,
+	FIX_EARLYCON_MEM_BASE,
+	__end_of_permanent_fixed_addresses,
+
+	FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
 	FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 	FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 
 
 	/* Support writing RO kernel text via kprobes, jump labels, etc. */
 	/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@ enum fixed_addresses {
 	__end_of_fixed_addresses
 	__end_of_fixed_addresses
 };
 };
 
 
+#define FIXMAP_PAGE_COMMON	(L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL	(FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO		(FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE	FIXMAP_PAGE_IO
+
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
 
 
 #include <asm-generic/fixmap.h>
 #include <asm-generic/fixmap.h>
 
 

+ 17 - 2
arch/arm/include/asm/futex.h

@@ -22,8 +22,11 @@
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags;				\
 	smp_mb();						\
 	smp_mb();						\
 	prefetchw(uaddr);					\
 	prefetchw(uaddr);					\
+	__ua_flags = uaccess_save_and_enable();			\
 	__asm__ __volatile__(					\
 	__asm__ __volatile__(					\
 	"1:	ldrex	%1, [%3]\n"				\
 	"1:	ldrex	%1, [%3]\n"				\
 	"	" insn "\n"					\
 	"	" insn "\n"					\
@@ -34,12 +37,15 @@
 	__futex_atomic_ex_table("%5")				\
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 
 static inline int
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 			      u32 oldval, u32 newval)
 {
 {
+	unsigned int __ua_flags;
 	int ret;
 	int ret;
 	u32 val;
 	u32 val;
 
 
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	smp_mb();
 	smp_mb();
 	/* Prefetching cannot fault */
 	/* Prefetching cannot fault */
 	prefetchw(uaddr);
 	prefetchw(uaddr);
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	ldrex	%1, [%4]\n"
 	"1:	ldrex	%1, [%4]\n"
 	"	teq	%1, %2\n"
 	"	teq	%1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	: "=&r" (ret), "=&r" (val)
 	: "=&r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 	smp_mb();
 	smp_mb();
 
 
 	*uval = val;
 	*uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 #include <asm/domain.h>
 #include <asm/domain.h>
 
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags = uaccess_save_and_enable();	\
 	__asm__ __volatile__(					\
 	__asm__ __volatile__(					\
 	"1:	" TUSER(ldr) "	%1, [%3]\n"			\
 	"1:	" TUSER(ldr) "	%1, [%3]\n"			\
 	"	" insn "\n"					\
 	"	" insn "\n"					\
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	__futex_atomic_ex_table("%5")				\
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 
 static inline int
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 			      u32 oldval, u32 newval)
 {
 {
+	unsigned int __ua_flags;
 	int ret = 0;
 	int ret = 0;
 	u32 val;
 	u32 val;
 
 
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 		return -EFAULT;
 		return -EFAULT;
 
 
 	preempt_disable();
 	preempt_disable();
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"	teq	%1, %2\n"
 	"	teq	%1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	: "+r" (ret), "=&r" (val)
 	: "+r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 
 
 	*uval = val;
 	*uval = val;
 	preempt_enable();
 	preempt_enable();

+ 0 - 2
arch/arm/include/asm/glue-cache.h

@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 
 
-#define dmac_map_area			__glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area			__glue(_CACHE,_dma_unmap_area)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #endif
 #endif
 
 

+ 58 - 17
arch/arm/include/asm/io.h

@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
  * The _caller variety takes a __builtin_return_address(0) value for
  * The _caller variety takes a __builtin_return_address(0) value for
  * /proc/vmalloc to use - and should only be used in non-inline functions.
  * /proc/vmalloc to use - and should only be used in non-inline functions.
  */
  */
-extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
-	size_t, unsigned int, void *);
 extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
 extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
 	void *);
 	void *);
-
 extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
 extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
 extern void __iounmap(volatile void __iomem *addr);
 extern void __iounmap(volatile void __iomem *addr);
-extern void __arm_iounmap(volatile void __iomem *addr);
 
 
 extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
 extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
 	unsigned int, void *);
 	unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 static inline void memset_io(volatile void __iomem *dst, unsigned c,
 static inline void memset_io(volatile void __iomem *dst, unsigned c,
 	size_t count)
 	size_t count)
 {
 {
-	memset((void __force *)dst, c, count);
+	extern void mmioset(void *, unsigned int, size_t);
+	mmioset((void __force *)dst, c, count);
 }
 }
 #define memset_io(dst,c,count) memset_io(dst,c,count)
 #define memset_io(dst,c,count) memset_io(dst,c,count)
 
 
 static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
 static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
 	size_t count)
 	size_t count)
 {
 {
-	memcpy(to, (const void __force *)from, count);
+	extern void mmiocpy(void *, const void *, size_t);
+	mmiocpy(to, (const void __force *)from, count);
 }
 }
 #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
 #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
 
 
 static inline void memcpy_toio(volatile void __iomem *to, const void *from,
 static inline void memcpy_toio(volatile void __iomem *to, const void *from,
 	size_t count)
 	size_t count)
 {
 {
-	memcpy((void __force *)to, from, count);
+	extern void mmiocpy(void *, const void *, size_t);
+	mmiocpy((void __force *)to, from, count);
 }
 }
 #define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
 #define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
 
 
@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
 #endif	/* readl */
 #endif	/* readl */
 
 
 /*
 /*
- * ioremap and friends.
+ * ioremap() and friends.
+ *
+ * ioremap() takes a resource address, and size.  Due to the ARM memory
+ * types, it is important to use the correct ioremap() function as each
+ * mapping has specific properties.
+ *
+ * Function		Memory type	Cacheability	Cache hint
+ * ioremap()		Device		n/a		n/a
+ * ioremap_nocache()	Device		n/a		n/a
+ * ioremap_cache()	Normal		Writeback	Read allocate
+ * ioremap_wc()		Normal		Non-cacheable	n/a
+ * ioremap_wt()		Normal		Non-cacheable	n/a
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ * - writes may be delayed before they hit the endpoint device
  *
  *
- * ioremap takes a PCI memory address, as specified in
- * Documentation/io-mapping.txt.
+ * ioremap_nocache() is the same as ioremap() as there are too many device
+ * drivers using this for device registers, and documentation which tells
+ * people to use it for such for this to be any different.  This is not a
+ * safe fallback for memory-like mappings, or memory regions where the
+ * compiler may generate unaligned accesses - eg, via inlining its own
+ * memcpy.
  *
  *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ * - ordering is not guaranteed without explicit dependencies or barrier
+ *   instructions
+ * - writes may be delayed before they hit the endpoint memory
+ *
+ * The cache hint is only a performance hint: CPUs may alias these hints.
+ * Eg, a CPU not implementing read allocate but implementing write allocate
+ * will provide a write allocate mapping instead.
  */
  */
-#define ioremap(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
-#define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC)
-#define ioremap_wt(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)
-#define iounmap				__arm_iounmap
+void __iomem *ioremap(resource_size_t res_cookie, size_t size);
+#define ioremap ioremap
+#define ioremap_nocache ioremap
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
+#define ioremap_cache ioremap_cache
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+#define ioremap_wc ioremap_wc
+#define ioremap_wt ioremap_wc
+
+void iounmap(volatile void __iomem *iomem_cookie);
+#define iounmap iounmap
 
 
 /*
 /*
  * io{read,write}{16,32}be() macros
  * io{read,write}{16,32}be() macros

+ 2 - 2
arch/arm/include/asm/memory.h

@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
  */
  */
 #define __pa(x)			__virt_to_phys((unsigned long)(x))
 #define __pa(x)			__virt_to_phys((unsigned long)(x))
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
-#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
+#define pfn_to_kaddr(pfn)	__va((phys_addr_t)(pfn) << PAGE_SHIFT)
 
 
 extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
 extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
 
 
@@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
  */
  */
 static inline phys_addr_t __virt_to_idmap(unsigned long x)
 static inline phys_addr_t __virt_to_idmap(unsigned long x)
 {
 {
-	if (arch_virt_to_idmap)
+	if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
 		return arch_virt_to_idmap(x);
 		return arch_virt_to_idmap(x);
 	else
 	else
 		return __virt_to_phys(x);
 		return __virt_to_phys(x);

+ 0 - 17
arch/arm/include/asm/outercache.h

@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
 
 
 #endif
 #endif
 
 
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
-	if (outer_cache.sync)
-		outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
 #endif	/* __ASM_OUTERCACHE_H */
 #endif	/* __ASM_OUTERCACHE_H */

+ 1 - 0
arch/arm/include/asm/pgtable-2level-hwdef.h

@@ -23,6 +23,7 @@
 #define PMD_PXNTABLE		(_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_PXNTABLE		(_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK		PMD_DOMAIN(0x0f)
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
 /*
 /*
  *   - section
  *   - section

+ 30 - 1
arch/arm/include/asm/pgtable-2level.h

@@ -129,7 +129,36 @@
 
 
 /*
 /*
  * These are the memory types, defined to be compatible with
  * These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+ * ARMv6+ without TEX remapping, they are a table index.
+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
+ *
+ * MT type		Pre-ARMv6	ARMv6+ type / cacheable status
+ * UNCACHED		Uncached	Strongly ordered
+ * BUFFERABLE		Bufferable	Normal memory / non-cacheable
+ * WRITETHROUGH		Writethrough	Normal memory / write through
+ * WRITEBACK		Writeback	Normal memory / write back, read alloc
+ * MINICACHE		Minicache	N/A
+ * WRITEALLOC		Writeback	Normal memory / write back, write alloc
+ * DEV_SHARED		Uncached	Device memory (shared)
+ * DEV_NONSHARED	Uncached	Device memory (non-shared)
+ * DEV_WC		Bufferable	Normal memory / non-cacheable
+ * DEV_CACHED		Writeback	Normal memory / write back, read alloc
+ * VECTORS		Variable	Normal memory / variable
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
  */
  */
 #define L_PTE_MT_UNCACHED	(_AT(pteval_t, 0x00) << 2)	/* 0000 */
 #define L_PTE_MT_UNCACHED	(_AT(pteval_t, 0x00) << 2)	/* 0000 */
 #define L_PTE_MT_BUFFERABLE	(_AT(pteval_t, 0x01) << 2)	/* 0001 */
 #define L_PTE_MT_BUFFERABLE	(_AT(pteval_t, 0x01) << 2)	/* 0001 */

+ 1 - 1
arch/arm/include/asm/smp.h

@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
 extern int __cpu_disable(void);
 extern int __cpu_disable(void);
 
 
 extern void __cpu_die(unsigned int cpu);
 extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
 
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@ struct smp_operations {
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
 	int  (*cpu_kill)(unsigned int cpu);
 	int  (*cpu_kill)(unsigned int cpu);
 	void (*cpu_die)(unsigned int cpu);
 	void (*cpu_die)(unsigned int cpu);
+	bool  (*cpu_can_disable)(unsigned int cpu);
 	int  (*cpu_disable)(unsigned int cpu);
 	int  (*cpu_disable)(unsigned int cpu);
 #endif
 #endif
 #endif
 #endif

+ 9 - 0
arch/arm/include/asm/smp_plat.h

@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
 extern int platform_can_secondary_boot(void);
 extern int platform_can_secondary_boot(void);
 extern int platform_can_cpu_hotplug(void);
 extern int platform_can_cpu_hotplug(void);
 
 
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	return 0;
+}
+#endif
+
 #endif
 #endif

+ 4 - 1
arch/arm/include/asm/switch_to.h

@@ -10,7 +10,9 @@
  * CPU.
  * CPU.
  */
  */
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
-#define finish_arch_switch(prev)	dsb(ish)
+#define __complete_pending_tlbi()	dsb(ish)
+#else
+#define __complete_pending_tlbi()
 #endif
 #endif
 
 
 /*
 /*
@@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
 
 
 #define switch_to(prev,next,last)					\
 #define switch_to(prev,next,last)					\
 do {									\
 do {									\
+	__complete_pending_tlbi();					\
 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
 } while (0)
 } while (0)
 
 

+ 0 - 3
arch/arm/include/asm/thread_info.h

@@ -74,9 +74,6 @@ struct thread_info {
 	.flags		= 0,						\
 	.flags		= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
 	.addr_limit	= KERNEL_DS,					\
-	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
 }
 }
 
 
 #define init_thread_info	(init_thread_union.thread_info)
 #define init_thread_info	(init_thread_union.thread_info)

+ 80 - 5
arch/arm/include/asm/uaccess.h

@@ -49,6 +49,35 @@ struct exception_table_entry
 
 
 extern int fixup_exception(struct pt_regs *regs);
 extern int fixup_exception(struct pt_regs *regs);
 
 
+/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	unsigned int old_domain = get_domain();
+
+	/* Set the current domain access to permit user accesses */
+	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+	return old_domain;
+#else
+	return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/* Restore the user access mask */
+	set_domain(flags);
+#endif
+}
+
 /*
 /*
  * These two are intentionally not defined anywhere - if the kernel
  * These two are intentionally not defined anywhere - if the kernel
  * code generates any references to them, that's a bug.
  * code generates any references to them, that's a bug.
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
 		register typeof(x) __r2 asm("r2");			\
 		register typeof(x) __r2 asm("r2");			\
 		register unsigned long __l asm("r1") = __limit;		\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 		case 1:							\
 			if (sizeof((x)) >= 8)				\
 			if (sizeof((x)) >= 8)				\
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
 			break;						\
 			break;						\
 		default: __e = __get_user_bad(); break;			\
 		default: __e = __get_user_bad(); break;			\
 		}							\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		x = (typeof(*(p))) __r2;				\
 		x = (typeof(*(p))) __r2;				\
 		__e;							\
 		__e;							\
 	})
 	})
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
 		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
 		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
 		register unsigned long __l asm("r1") = __limit;		\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 		case 1:							\
 			__put_user_x(__r2, __p, __e, __l, 1);		\
 			__put_user_x(__r2, __p, __e, __l, 1);		\
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
 			break;						\
 			break;						\
 		default: __e = __put_user_bad(); break;			\
 		default: __e = __put_user_bad(); break;			\
 		}							\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		__e;							\
 		__e;							\
 	})
 	})
 
 
@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
 do {									\
 do {									\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_val;						\
 	unsigned long __gu_val;						\
+	unsigned int __ua_flags;					\
 	__chk_user_ptr(ptr);						\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
 	default: (__gu_val) = __get_user_bad();				\
 	default: (__gu_val) = __get_user_bad();				\
 	}								\
 	}								\
+	uaccess_restore(__ua_flags);					\
 	(x) = (__typeof__(*(ptr)))__gu_val;				\
 	(x) = (__typeof__(*(ptr)))__gu_val;				\
 } while (0)
 } while (0)
 
 
@@ -369,9 +405,11 @@ do {									\
 #define __put_user_err(x, ptr, err)					\
 #define __put_user_err(x, ptr, err)					\
 do {									\
 do {									\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
+	unsigned int __ua_flags;					\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	switch (sizeof(*(ptr))) {					\
 	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
 	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
 	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
 	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
@@ -379,6 +417,7 @@ do {									\
 	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
 	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
 	default: __put_user_bad();					\
 	default: __put_user_bad();					\
 	}								\
 	}								\
+	uaccess_restore(__ua_flags);					\
 } while (0)
 } while (0)
 
 
 #define __put_user_asm(x, __pu_addr, err, instr)		\
 #define __put_user_asm(x, __pu_addr, err, instr)		\
@@ -451,11 +490,46 @@ do {									\
 
 
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_from_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_to_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_clear_user(addr, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
 #else
 #else
 #define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
 #define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
 #define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
@@ -488,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
 	return n;
 	return n;
 }
 }
 
 
+/* These are from lib/ code, and use __get_user() and friends */
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strlen_user(const char __user *str);

+ 9 - 3
arch/arm/kernel/armksyms.c

@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
 
 
 extern void fpundefinstr(void);
 extern void fpundefinstr(void);
 
 
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
 	/* platform dependent support */
 	/* platform dependent support */
 EXPORT_SYMBOL(arm_delay_ops);
 EXPORT_SYMBOL(arm_delay_ops);
 
 
@@ -88,12 +91,15 @@ EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(__memzero);
 EXPORT_SYMBOL(__memzero);
 
 
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(copy_page);
 
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
 
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
 EXPORT_SYMBOL(__get_user_2);

+ 25 - 9
arch/arm/kernel/entry-armv.S

@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
 #define SPFIX(code...)
 #define SPFIX(code...)
 #endif
 #endif
 
 
-	.macro	svc_entry, stack_hole=0, trace=1
+	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart		)
  UNWIND(.fnstart		)
  UNWIND(.save {r0 - pc}		)
  UNWIND(.save {r0 - pc}		)
-	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	mov	r0, sp		)
  SPFIX(	mov	r0, sp		)
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
 	ldmia	r0, {r3 - r5}
 	ldmia	r0, {r3 - r5}
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	mov	r6, #-1			@  ""  ""      ""       ""
 	mov	r6, #-1			@  ""  ""      ""       ""
-	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
  SPFIX(	addeq	r2, r2, #4	)
  SPFIX(	addeq	r2, r2, #4	)
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 					@ from the exception stack
 					@ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
 	@
 	@
 	stmia	r7, {r2 - r6}
 	stmia	r7, {r2 - r6}
 
 
+	uaccess_save r0
+	.if \uaccess
+	uaccess_disable r0
+	.endif
+
 	.if \trace
 	.if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
 	bl	trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
 
 
 	.align	5
 	.align	5
 __dabt_svc:
 __dabt_svc:
-	svc_entry
+	svc_entry uaccess=0
 	mov	r2, sp
 	mov	r2, sp
 	dabt_helper
 	dabt_helper
  THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
  THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 #endif
 
 
-	.macro	usr_entry, trace=1
+	.macro	usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart	)
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)	@ don't unwind the user space
  UNWIND(.cantunwind	)	@ don't unwind the user space
 	sub	sp, sp, #S_FRAME_SIZE
 	sub	sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
  ARM(	stmdb	r0, {sp, lr}^			)
  ARM(	stmdb	r0, {sp, lr}^			)
  THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
  THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 
 
+	.if \uaccess
+	uaccess_disable ip
+	.endif
+
 	@ Enable the alignment trap while in kernel mode
 	@ Enable the alignment trap while in kernel mode
  ATRAP(	teq	r8, r7)
  ATRAP(	teq	r8, r7)
  ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
  ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
@@ -410,7 +419,7 @@ ENDPROC(__fiq_abt)
 	zero_fp
 	zero_fp
 
 
 	.if	\trace
 	.if	\trace
-#ifdef CONFIG_IRQSOFF_TRACER
+#ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
 	bl	trace_hardirqs_off
 #endif
 #endif
 	ct_user_exit save = 0
 	ct_user_exit save = 0
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
 
 
 	.align	5
 	.align	5
 __dabt_usr:
 __dabt_usr:
-	usr_entry
+	usr_entry uaccess=0
 	kuser_cmpxchg_check
 	kuser_cmpxchg_check
 	mov	r2, sp
 	mov	r2, sp
 	dabt_helper
 	dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
 
 
 	.align	5
 	.align	5
 __und_usr:
 __und_usr:
-	usr_entry
+	usr_entry uaccess=0
 
 
 	mov	r2, r4
 	mov	r2, r4
 	mov	r3, r5
 	mov	r3, r5
@@ -484,6 +493,8 @@ __und_usr:
 1:	ldrt	r0, [r4]
 1:	ldrt	r0, [r4]
  ARM_BE8(rev	r0, r0)				@ little endian instruction
  ARM_BE8(rev	r0, r0)				@ little endian instruction
 
 
+	uaccess_disable ip
+
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r4 = PC value for the faulting instruction
 	@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
 2:	ldrht	r5, [r4]
 2:	ldrht	r5, [r4]
 ARM_BE8(rev16	r5, r5)				@ little endian instruction
 ARM_BE8(rev16	r5, r5)				@ little endian instruction
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
-	blo	__und_usr_fault_16		@ 16bit undefined instruction
+	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
 3:	ldrht	r0, [r2]
 3:	ldrht	r0, [r2]
 ARM_BE8(rev16	r0, r0)				@ little endian instruction
 ARM_BE8(rev16	r0, r0)				@ little endian instruction
+	uaccess_disable ip
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	orr	r0, r0, r5, lsl #16
 	orr	r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
 __und_usr_fault_32:
 __und_usr_fault_32:
 	mov	r1, #4
 	mov	r1, #4
 	b	1f
 	b	1f
+__und_usr_fault_16_pan:
+	uaccess_disable ip
 __und_usr_fault_16:
 __und_usr_fault_16:
 	mov	r1, #2
 	mov	r1, #2
 1:	mov	r0, sp
 1:	mov	r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
 	ldr	r4, [r2, #TI_TP_VALUE]
 	ldr	r4, [r2, #TI_TP_VALUE]
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
 #ifdef CONFIG_CPU_USE_DOMAINS
+	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
+	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
 #endif
 #endif
 	switch_tls r1, r4, r5, r3, r7
 	switch_tls r1, r4, r5, r3, r7

+ 3 - 0
arch/arm/kernel/entry-common.S

@@ -81,6 +81,7 @@ slow_work_pending:
 	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
 	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
 	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
 	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
 	b	local_restart			@ ... and off we go
 	b	local_restart			@ ... and off we go
+ENDPROC(ret_fast_syscall)
 
 
 /*
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
@@ -196,6 +197,8 @@ ENTRY(vector_swi)
  USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
  USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
 #endif
 #endif
 
 
+	uaccess_disable tbl
+
 	adr	tbl, sys_call_table		@ load syscall table pointer
 	adr	tbl, sys_call_table		@ load syscall table pointer
 
 
 #if defined(CONFIG_OABI_COMPAT)
 #if defined(CONFIG_OABI_COMPAT)

+ 47 - 65
arch/arm/kernel/entry-header.S

@@ -196,7 +196,7 @@
 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
 	.endm
 	.endm
 
 
-#ifndef CONFIG_THUMB2_KERNEL
+
 	.macro	svc_exit, rpsr, irq = 0
 	.macro	svc_exit, rpsr, irq = 0
 	.if	\irq != 0
 	.if	\irq != 0
 	@ IRQs already off
 	@ IRQs already off
@@ -215,6 +215,10 @@
 	blne	trace_hardirqs_off
 	blne	trace_hardirqs_off
 #endif
 #endif
 	.endif
 	.endif
+	uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode SVC restore
 	msr	spsr_cxsf, \rpsr
 	msr	spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
 	strex	r1, r2, [r0]			@ clear the exclusive monitor
 	strex	r1, r2, [r0]			@ clear the exclusive monitor
 #endif
 #endif
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+#else
+	@ Thumb mode SVC restore
+	ldr	lr, [sp, #S_SP]			@ top of the stack
+	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
+
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
+
+	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
+	ldmia	sp, {r0 - r12}
+	mov	sp, lr
+	ldr	lr, [sp], #4
+	rfeia	sp!
+#endif
 	.endm
 	.endm
 
 
 	@
 	@
@@ -241,6 +259,9 @@
 	@ on the stack remains correct).
 	@ on the stack remains correct).
 	@
 	@
 	.macro  svc_exit_via_fiq
 	.macro  svc_exit_via_fiq
+	uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r0, sp
 	mov	r0, sp
 	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
 	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
 				@ clobber state restored below)
 				@ clobber state restored below)
@@ -250,9 +271,27 @@
 	msr	spsr_cxsf, r9
 	msr	spsr_cxsf, r9
 	ldr	r0, [r0, #S_R0]
 	ldr	r0, [r0, #S_R0]
 	ldmia	r8, {pc}^
 	ldmia	r8, {pc}^
+#else
+	@ Thumb mode restore
+	add	r0, sp, #S_R2
+	ldr	lr, [sp, #S_LR]
+	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+			        @ clobber state restored below)
+	ldmia	r0, {r2 - r12}
+	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+	msr	cpsr_c, r1
+	sub	r0, #S_R2
+	add	r8, r0, #S_PC
+	ldmia	r0, {r0 - r1}
+	rfeia	r8
+#endif
 	.endm
 	.endm
 
 
+
 	.macro	restore_user_regs, fast = 0, offset = 0
 	.macro	restore_user_regs, fast = 0, offset = 0
+	uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r2, sp
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
@@ -270,72 +309,16 @@
 						@ after ldm {}^
 						@ after ldm {}^
 	add	sp, sp, #\offset + S_FRAME_SIZE
 	add	sp, sp, #\offset + S_FRAME_SIZE
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-
-#else	/* CONFIG_THUMB2_KERNEL */
-	.macro	svc_exit, rpsr, irq = 0
-	.if	\irq != 0
-	@ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
-	@ The parent context IRQs must have been enabled to get here in
-	@ the first place, so there's no point checking the PSR I bit.
-	bl	trace_hardirqs_on
-#endif
-	.else
-	@ IRQs off again before pulling preserved data off the stack
-	disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
-	tst	\rpsr, #PSR_I_BIT
-	bleq	trace_hardirqs_on
-	tst	\rpsr, #PSR_I_BIT
-	blne	trace_hardirqs_off
-#endif
-	.endif
-	ldr	lr, [sp, #S_SP]			@ top of the stack
-	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
-
-	@ We must avoid clrex due to Cortex-A15 erratum #830321
-	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
-
-	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
-	ldmia	sp, {r0 - r12}
-	mov	sp, lr
-	ldr	lr, [sp], #4
-	rfeia	sp!
-	.endm
-
-	@
-	@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
-	@
-	@ For full details see non-Thumb implementation above.
-	@
-	.macro  svc_exit_via_fiq
-	add	r0, sp, #S_R2
-	ldr	lr, [sp, #S_LR]
-	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
-			        @ clobber state restored below)
-	ldmia	r0, {r2 - r12}
-	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
-	msr	cpsr_c, r1
-	sub	r0, #S_R2
-	add	r8, r0, #S_PC
-	ldmia	r0, {r0 - r1}
-	rfeia	r8
-	.endm
-
-#ifdef CONFIG_CPU_V7M
-	/*
-	 * Note we don't need to do clrex here as clearing the local monitor is
-	 * part of each exception entry and exit sequence.
-	 */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+	@ V7M restore.
+	@ Note that we don't need to do clrex here as clearing the local
+	@ monitor is part of the exception entry and exit sequence.
 	.if	\offset
 	.if	\offset
 	add	sp, #\offset
 	add	sp, #\offset
 	.endif
 	.endif
 	v7m_exception_slow_exit ret_r0 = \fast
 	v7m_exception_slow_exit ret_r0 = \fast
-	.endm
-#else	/* ifdef CONFIG_CPU_V7M */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#else
+	@ Thumb mode restore
 	mov	r2, sp
 	mov	r2, sp
 	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
 	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
@@ -353,9 +336,8 @@
 	.endif
 	.endif
 	add	sp, sp, #S_FRAME_SIZE - S_SP
 	add	sp, sp, #S_FRAME_SIZE - S_SP
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-#endif	/* ifdef CONFIG_CPU_V7M / else */
 #endif	/* !CONFIG_THUMB2_KERNEL */
 #endif	/* !CONFIG_THUMB2_KERNEL */
+	.endm
 
 
 /*
 /*
  * Context tracking subsystem.  Used to instrument transitions
  * Context tracking subsystem.  Used to instrument transitions

+ 4 - 4
arch/arm/kernel/head.S

@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
 	sub	lr, r4, r5			@ mmu has been enabled
 	sub	lr, r4, r5			@ mmu has been enabled
 	add	r3, r7, lr
 	add	r3, r7, lr
 	ldrd	r4, [r3, #0]			@ get secondary_data.pgdir
 	ldrd	r4, [r3, #0]			@ get secondary_data.pgdir
+ARM_BE8(eor	r4, r4, r5)			@ Swap r5 and r4 in BE:
+ARM_BE8(eor	r5, r4, r5)			@ it can be done in 3 steps
+ARM_BE8(eor	r4, r4, r5)			@ without using a temp reg.
 	ldr	r8, [r3, #8]			@ get secondary_data.swapper_pg_dir
 	ldr	r8, [r3, #8]			@ get secondary_data.swapper_pg_dir
 	badr	lr, __enable_mmu		@ return address
 	badr	lr, __enable_mmu		@ return address
 	mov	r13, r12			@ __secondary_switched address
 	mov	r13, r12			@ __secondary_switched address
@@ -461,10 +464,7 @@ __enable_mmu:
 #ifdef CONFIG_ARM_LPAE
 #ifdef CONFIG_ARM_LPAE
 	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
 	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
 #else
 #else
-	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+	mov	r5, #DACR_INIT
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
 #endif
 #endif

+ 1 - 0
arch/arm/kernel/irq.c

@@ -39,6 +39,7 @@
 #include <linux/export.h>
 #include <linux/export.h>
 
 
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
 #include <asm/exception.h>
 #include <asm/exception.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/irq.h>

+ 5 - 2
arch/arm/kernel/perf_event.c

@@ -795,8 +795,10 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
 
 
 	/* Don't bother with PPIs; they're already affine */
 	/* Don't bother with PPIs; they're already affine */
 	irq = platform_get_irq(pdev, 0);
 	irq = platform_get_irq(pdev, 0);
-	if (irq >= 0 && irq_is_percpu(irq))
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		cpumask_setall(&pmu->supported_cpus);
 		return 0;
 		return 0;
+	}
 
 
 	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
 	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
 	if (!irqs)
 	if (!irqs)
@@ -818,12 +820,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
 			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
 			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
 				break;
 				break;
 
 
-		of_node_put(dn);
 		if (cpu >= nr_cpu_ids) {
 		if (cpu >= nr_cpu_ids) {
 			pr_warn("Failed to find logical CPU for %s\n",
 			pr_warn("Failed to find logical CPU for %s\n",
 				dn->name);
 				dn->name);
+			of_node_put(dn);
 			break;
 			break;
 		}
 		}
+		of_node_put(dn);
 
 
 		irqs[i] = cpu;
 		irqs[i] = cpu;
 		cpumask_set_cpu(cpu, &pmu->supported_cpus);
 		cpumask_set_cpu(cpu, &pmu->supported_cpus);

+ 40 - 16
arch/arm/kernel/process.c

@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
 	ledtrig_cpu(CPU_LED_IDLE_END);
 	ledtrig_cpu(CPU_LED_IDLE_END);
 }
 }
 
 
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
-	cpu_die();
-}
-#endif
-
 void __show_regs(struct pt_regs *regs)
 void __show_regs(struct pt_regs *regs)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
@@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs)
 	buf[4] = '\0';
 	buf[4] = '\0';
 
 
 #ifndef CONFIG_CPU_V7M
 #ifndef CONFIG_CPU_V7M
-	printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
-		buf, interrupts_enabled(regs) ? "n" : "ff",
-		fast_interrupts_enabled(regs) ? "n" : "ff",
-		processor_modes[processor_mode(regs)],
-		isa_modes[isa_mode(regs)],
-		get_fs() == get_ds() ? "kernel" : "user");
+	{
+		unsigned int domain = get_domain();
+		const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		/*
+		 * Get the domain register for the parent context. In user
+		 * mode, we don't save the DACR, so lets use what it should
+		 * be. For other modes, we place it after the pt_regs struct.
+		 */
+		if (user_mode(regs))
+			domain = DACR_UACCESS_ENABLE;
+		else
+			domain = *(unsigned int *)(regs + 1);
+#endif
+
+		if ((domain & domain_mask(DOMAIN_USER)) ==
+		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+			segment = "none";
+		else if (get_fs() == get_ds())
+			segment = "kernel";
+		else
+			segment = "user";
+
+		printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+			buf, interrupts_enabled(regs) ? "n" : "ff",
+			fast_interrupts_enabled(regs) ? "n" : "ff",
+			processor_modes[processor_mode(regs)],
+			isa_modes[isa_mode(regs)], segment);
+	}
 #else
 #else
 	printk("xPSR: %08lx\n", regs->ARM_cpsr);
 	printk("xPSR: %08lx\n", regs->ARM_cpsr);
 #endif
 #endif
@@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs)
 		buf[0] = '\0';
 		buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
 #ifdef CONFIG_CPU_CP15_MMU
 		{
 		{
-			unsigned int transbase, dac;
+			unsigned int transbase, dac = get_domain();
 			asm("mrc p15, 0, %0, c2, c0\n\t"
 			asm("mrc p15, 0, %0, c2, c0\n\t"
-			    "mrc p15, 0, %1, c3, c0\n"
-			    : "=r" (transbase), "=r" (dac));
+			    : "=r" (transbase));
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 			  	transbase, dac);
 			  	transbase, dac);
 		}
 		}
@@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
 
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
 
+	/*
+	 * Copy the initial value of the domain access control register
+	 * from the current thread: thread->addr_limit will have been
+	 * copied from the current thread via setup_thread_stack() in
+	 * kernel/fork.c
+	 */
+	thread->cpu_domain = get_domain();
+
 	if (likely(!(p->flags & PF_KTHREAD))) {
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		*childregs = *current_pt_regs();
 		childregs->ARM_r0 = 0;
 		childregs->ARM_r0 = 0;

+ 1 - 1
arch/arm/kernel/reboot.c

@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
 	flush_cache_all();
 	flush_cache_all();
 
 
 	/* Switch to the identity mapping. */
 	/* Switch to the identity mapping. */
-	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+	phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
 	phys_reset((unsigned long)addr);
 	phys_reset((unsigned long)addr);
 
 
 	/* Should never get here. */
 	/* Should never get here. */

+ 5 - 1
arch/arm/kernel/setup.c

@@ -37,6 +37,7 @@
 #include <asm/cpu.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
 #include <asm/elf.h>
+#include <asm/fixmap.h>
 #include <asm/procinfo.h>
 #include <asm/procinfo.h>
 #include <asm/psci.h>
 #include <asm/psci.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
@@ -954,6 +955,9 @@ void __init setup_arch(char **cmdline_p)
 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 	*cmdline_p = cmd_line;
 	*cmdline_p = cmd_line;
 
 
+	if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+		early_fixmap_init();
+
 	parse_early_param();
 	parse_early_param();
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
@@ -1015,7 +1019,7 @@ static int __init topology_init(void)
 
 
 	for_each_possible_cpu(cpu) {
 	for_each_possible_cpu(cpu) {
 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
-		cpuinfo->cpu.hotpluggable = 1;
+		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
 		register_cpu(&cpuinfo->cpu, cpu);
 		register_cpu(&cpuinfo->cpu, cpu);
 	}
 	}
 
 

+ 17 - 4
arch/arm/kernel/smp.c

@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
 	if (smp_ops.cpu_disable)
 	if (smp_ops.cpu_disable)
 		return smp_ops.cpu_disable(cpu);
 		return smp_ops.cpu_disable(cpu);
 
 
+	return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	/* cpu_die must be specified to support hotplug */
+	if (!smp_ops.cpu_die)
+		return 0;
+
+	if (smp_ops.cpu_can_disable)
+		return smp_ops.cpu_can_disable(cpu);
+
 	/*
 	/*
 	 * By default, allow disabling all CPUs except the first one,
 	 * By default, allow disabling all CPUs except the first one,
 	 * since this is special on a lot of platforms, e.g. because
 	 * since this is special on a lot of platforms, e.g. because
 	 * of clock tick interrupts.
 	 * of clock tick interrupts.
 	 */
 	 */
-	return cpu == 0 ? -EPERM : 0;
+	return cpu != 0;
 }
 }
+
 /*
 /*
  * __cpu_disable runs on the processor to be shutdown.
  * __cpu_disable runs on the processor to be shutdown.
  */
  */
@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
  * of the other hotplug-cpu capable cores, so presumably coming
  * of the other hotplug-cpu capable cores, so presumably coming
  * out of idle fixes this.
  * out of idle fixes this.
  */
  */
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
 {
 {
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 
 
@@ -578,7 +591,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 	struct pt_regs *old_regs = set_irq_regs(regs);
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
 
 	if ((unsigned)ipinr < NR_IPI) {
 	if ((unsigned)ipinr < NR_IPI) {
-		trace_ipi_entry(ipi_types[ipinr]);
+		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
 		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
 		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
 	}
 	}
 
 
@@ -637,7 +650,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 	}
 	}
 
 
 	if ((unsigned)ipinr < NR_IPI)
 	if ((unsigned)ipinr < NR_IPI)
-		trace_ipi_exit(ipi_types[ipinr]);
+		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
 	set_irq_regs(old_regs);
 	set_irq_regs(old_regs);
 }
 }
 
 

+ 3 - 0
arch/arm/kernel/swp_emulate.c

@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
 
 
 	while (1) {
 	while (1) {
 		unsigned long temp;
 		unsigned long temp;
+		unsigned int __ua_flags;
 
 
+		__ua_flags = uaccess_save_and_enable();
 		if (type == TYPE_SWPB)
 		if (type == TYPE_SWPB)
 			__user_swpb_asm(*data, address, res, temp);
 			__user_swpb_asm(*data, address, res, temp);
 		else
 		else
 			__user_swp_asm(*data, address, res, temp);
 			__user_swp_asm(*data, address, res, temp);
+		uaccess_restore(__ua_flags);
 
 
 		if (likely(res != -EAGAIN) || signal_pending(current))
 		if (likely(res != -EAGAIN) || signal_pending(current))
 			break;
 			break;

+ 0 - 1
arch/arm/kernel/traps.c

@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
 	kuser_init(vectors_base);
 	kuser_init(vectors_base);
 
 
 	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 #else /* ifndef CONFIG_CPU_V7M */
 #else /* ifndef CONFIG_CPU_V7M */
 	/*
 	/*
 	 * on V7-M there is no need to copy the vector table to a dedicated
 	 * on V7-M there is no need to copy the vector table to a dedicated

+ 3 - 4
arch/arm/kernel/vdso.c

@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
  */
  */
 void update_vsyscall(struct timekeeper *tk)
 void update_vsyscall(struct timekeeper *tk)
 {
 {
-	struct timespec xtime_coarse;
 	struct timespec64 *wtm = &tk->wall_to_monotonic;
 	struct timespec64 *wtm = &tk->wall_to_monotonic;
 
 
 	if (!cntvct_ok) {
 	if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
 
 
 	vdso_write_begin(vdso_data);
 	vdso_write_begin(vdso_data);
 
 
-	xtime_coarse = __current_kernel_time();
 	vdso_data->tk_is_cntvct			= tk_is_cntvct(tk);
 	vdso_data->tk_is_cntvct			= tk_is_cntvct(tk);
-	vdso_data->xtime_coarse_sec		= xtime_coarse.tv_sec;
-	vdso_data->xtime_coarse_nsec		= xtime_coarse.tv_nsec;
+	vdso_data->xtime_coarse_sec		= tk->xtime_sec;
+	vdso_data->xtime_coarse_nsec		= (u32)(tk->tkr_mono.xtime_nsec >>
+							tk->tkr_mono.shift);
 	vdso_data->wtm_clock_sec		= wtm->tv_sec;
 	vdso_data->wtm_clock_sec		= wtm->tv_sec;
 	vdso_data->wtm_clock_nsec		= wtm->tv_nsec;
 	vdso_data->wtm_clock_nsec		= wtm->tv_nsec;
 
 

+ 3 - 3
arch/arm/lib/clear_user.S

@@ -12,14 +12,14 @@
 
 
 		.text
 		.text
 
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  * Returns  : number of bytes NOT cleared
  */
  */
 ENTRY(__clear_user_std)
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
 		stmfd	sp!, {r1, lr}
 		stmfd	sp!, {r1, lr}
 		mov	r2, #0
 		mov	r2, #0
 		cmp	r1, #4
 		cmp	r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
 USER(		strnebt	r2, [r0])
 USER(		strnebt	r2, [r0])
 		mov	r0, #0
 		mov	r0, #0
 		ldmfd	sp!, {r1, pc}
 		ldmfd	sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 ENDPROC(__clear_user_std)
 
 
 		.pushsection .text.fixup,"ax"
 		.pushsection .text.fixup,"ax"

+ 3 - 3
arch/arm/lib/copy_from_user.S

@@ -17,7 +17,7 @@
 /*
 /*
  * Prototype:
  * Prototype:
  *
  *
- *	size_t __copy_from_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_from_user(void *to, const void *from, size_t n)
  *
  *
  * Purpose:
  * Purpose:
  *
  *
@@ -89,11 +89,11 @@
 
 
 	.text
 	.text
 
 
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
 
 
 #include "copy_template.S"
 #include "copy_template.S"
 
 
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
 
 
 	.pushsection .fixup,"ax"
 	.pushsection .fixup,"ax"
 	.align 0
 	.align 0

+ 3 - 3
arch/arm/lib/copy_to_user.S

@@ -17,7 +17,7 @@
 /*
 /*
  * Prototype:
  * Prototype:
  *
  *
- *	size_t __copy_to_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_to_user(void *to, const void *from, size_t n)
  *
  *
  * Purpose:
  * Purpose:
  *
  *
@@ -93,11 +93,11 @@
 	.text
 	.text
 
 
 ENTRY(__copy_to_user_std)
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
 
 
 #include "copy_template.S"
 #include "copy_template.S"
 
 
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
 ENDPROC(__copy_to_user_std)
 
 
 	.pushsection .text.fixup,"ax"
 	.pushsection .text.fixup,"ax"

+ 14 - 0
arch/arm/lib/csumpartialcopyuser.S

@@ -17,6 +17,19 @@
 
 
 		.text
 		.text
 
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		.macro	save_regs
+		mrc	p15, 0, ip, c3, c0, 0
+		stmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		uaccess_enable ip
+		.endm
+
+		.macro	load_regs
+		ldmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		mcr	p15, 0, ip, c3, c0, 0
+		ret	lr
+		.endm
+#else
 		.macro	save_regs
 		.macro	save_regs
 		stmfd	sp!, {r1, r2, r4 - r8, lr}
 		stmfd	sp!, {r1, r2, r4 - r8, lr}
 		.endm
 		.endm
@@ -24,6 +37,7 @@
 		.macro	load_regs
 		.macro	load_regs
 		ldmfd	sp!, {r1, r2, r4 - r8, pc}
 		ldmfd	sp!, {r1, r2, r4 - r8, pc}
 		.endm
 		.endm
+#endif
 
 
 		.macro	load1b,	reg1
 		.macro	load1b,	reg1
 		ldrusr	\reg1, r0, 1
 		ldrusr	\reg1, r0, 1

+ 2 - 0
arch/arm/lib/memcpy.S

@@ -61,8 +61,10 @@
 
 
 /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
 /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
 
 
+ENTRY(mmiocpy)
 ENTRY(memcpy)
 ENTRY(memcpy)
 
 
 #include "copy_template.S"
 #include "copy_template.S"
 
 
 ENDPROC(memcpy)
 ENDPROC(memcpy)
+ENDPROC(mmiocpy)

+ 2 - 0
arch/arm/lib/memset.S

@@ -16,6 +16,7 @@
 	.text
 	.text
 	.align	5
 	.align	5
 
 
+ENTRY(mmioset)
 ENTRY(memset)
 ENTRY(memset)
 UNWIND( .fnstart         )
 UNWIND( .fnstart         )
 	ands	r3, r0, #3		@ 1 unaligned?
 	ands	r3, r0, #3		@ 1 unaligned?
@@ -133,3 +134,4 @@ UNWIND( .fnstart            )
 	b	1b
 	b	1b
 UNWIND( .fnend   )
 UNWIND( .fnend   )
 ENDPROC(memset)
 ENDPROC(memset)
+ENDPROC(mmioset)

+ 3 - 3
arch/arm/lib/uaccess_with_memcpy.c

@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 	}
 	}
 
 
 	/* the mmap semaphore is taken only if not in an atomic context */
 	/* the mmap semaphore is taken only if not in an atomic context */
-	atomic = in_atomic();
+	atomic = faulthandler_disabled();
 
 
 	if (!atomic)
 	if (!atomic)
 		down_read(&current->mm->mmap_sem);
 		down_read(&current->mm->mmap_sem);
@@ -136,7 +136,7 @@ out:
 }
 }
 
 
 unsigned long
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
 	/*
 	/*
 	 * This test is stubbed out of the main function above to keep
 	 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
 	return n;
 	return n;
 }
 }
 
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
 {
 	/* See rational for this in __copy_to_user() above. */
 	/* See rational for this in __copy_to_user() above. */
 	if (n < 64)
 	if (n < 64)

+ 1 - 0
arch/arm/mach-mmp/pm-pxa910.c

@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <asm/mach-types.h>
 #include <asm/mach-types.h>
+#include <asm/outercache.h>
 #include <mach/hardware.h>
 #include <mach/hardware.h>
 #include <mach/cputype.h>
 #include <mach/cputype.h>
 #include <mach/addr-map.h>
 #include <mach/addr-map.h>

+ 7 - 0
arch/arm/mach-omap2/Kconfig

@@ -29,6 +29,7 @@ config ARCH_OMAP4
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_TWD if SMP
 	select OMAP_INTERCONNECT
 	select OMAP_INTERCONNECT
+	select OMAP_INTERCONNECT_BARRIER
 	select PL310_ERRATA_588369 if CACHE_L2X0
 	select PL310_ERRATA_588369 if CACHE_L2X0
 	select PL310_ERRATA_727915 if CACHE_L2X0
 	select PL310_ERRATA_727915 if CACHE_L2X0
 	select PM_OPP if PM
 	select PM_OPP if PM
@@ -46,6 +47,7 @@ config SOC_OMAP5
 	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select HAVE_ARM_ARCH_TIMER
 	select ARM_ERRATA_798181 if SMP
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 
 config SOC_AM33XX
 config SOC_AM33XX
 	bool "TI AM33XX"
 	bool "TI AM33XX"
@@ -70,6 +72,7 @@ config SOC_DRA7XX
 	select HAVE_ARM_ARCH_TIMER
 	select HAVE_ARM_ARCH_TIMER
 	select IRQ_CROSSBAR
 	select IRQ_CROSSBAR
 	select ARM_ERRATA_798181 if SMP
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 
 config ARCH_OMAP2PLUS
 config ARCH_OMAP2PLUS
 	bool
 	bool
@@ -91,6 +94,10 @@ config ARCH_OMAP2PLUS
 	help
 	help
 	  Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 	  Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 
 
+config OMAP_INTERCONNECT_BARRIER
+	bool
+	select ARM_HEAVY_MB
+	
 
 
 if ARCH_OMAP2PLUS
 if ARCH_OMAP2PLUS
 
 

+ 1 - 0
arch/arm/mach-omap2/common.c

@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
 void __init omap_reserve(void)
 void __init omap_reserve(void)
 {
 {
 	omap_secure_ram_reserve_memblock();
 	omap_secure_ram_reserve_memblock();
+	omap_barrier_reserve_memblock();
 }
 }

+ 9 - 0
arch/arm/mach-omap2/common.h

@@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
 }
 }
 #endif
 #endif
 
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+void omap_barrier_reserve_memblock(void);
+void omap_barriers_init(void);
+#else
+static inline void omap_barrier_reserve_memblock(void)
+{
+}
+#endif
+
 /* This gets called from mach-omap2/io.c, do not call this */
 /* This gets called from mach-omap2/io.c, do not call this */
 void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
 void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
 
 

+ 0 - 1
arch/arm/mach-omap2/dma.c

@@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void)
 	u8 revision = dma_read(REVISION, 0) & 0xff;
 	u8 revision = dma_read(REVISION, 0) & 0xff;
 	printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
 	printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
 				revision >> 4, revision & 0xf);
 				revision >> 4, revision & 0xf);
-	return;
 }
 }
 
 
 static unsigned configure_dma_errata(void)
 static unsigned configure_dma_errata(void)

+ 0 - 33
arch/arm/mach-omap2/include/mach/barriers.h

@@ -1,33 +0,0 @@
-/*
- * OMAP memory barrier header.
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- *  Santosh Shilimkar <santosh.shilimkar@ti.com>
- *  Richard Woodruff <r-woodruff2@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __MACH_BARRIERS_H
-#define __MACH_BARRIERS_H
-
-#include <asm/outercache.h>
-
-extern void omap_bus_sync(void);
-
-#define rmb()		dsb()
-#define wmb()		do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
-#define mb()		wmb()
-
-#endif	/* __MACH_BARRIERS_H */

+ 2 - 0
arch/arm/mach-omap2/io.c

@@ -306,6 +306,7 @@ void __init am33xx_map_io(void)
 void __init omap4_map_io(void)
 void __init omap4_map_io(void)
 {
 {
 	iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
 	iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
+	omap_barriers_init();
 }
 }
 #endif
 #endif
 
 
@@ -313,6 +314,7 @@ void __init omap4_map_io(void)
 void __init omap5_map_io(void)
 void __init omap5_map_io(void)
 {
 {
 	iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
 	iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+	omap_barriers_init();
 }
 }
 #endif
 #endif
 /*
 /*

+ 121 - 0
arch/arm/mach-omap2/omap4-common.c

@@ -51,6 +51,127 @@ static void __iomem *twd_base;
 
 
 #define IRQ_LOCALTIMER		29
 #define IRQ_LOCALTIMER		29
 
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA			0xfe600000
+
+static void __iomem *dram_sync, *sram_sync;
+static phys_addr_t dram_sync_paddr;
+static u32 dram_sync_size;
+
+/*
+ * The OMAP4 bus structure contains asynchrnous bridges which can buffer
+ * data writes from the MPU. These asynchronous bridges can be found on
+ * paths between the MPU to EMIF, and the MPU to L3 interconnects.
+ *
+ * We need to be careful about re-ordering which can happen as a result
+ * of different accesses being performed via different paths, and
+ * therefore different asynchronous bridges.
+ */
+
+/*
+ * OMAP4 interconnect barrier which is called for each mb() and wmb().
+ * This is to ensure that normal paths to DRAM (normal memory, cacheable
+ * accesses) are properly synchronised with writes to DMA coherent memory
+ * (normal memory, uncacheable) and device writes.
+ *
+ * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
+ * path, as we need to ensure that data is visible to other system
+ * masters prior to writes to those system masters being seen.
+ *
+ * Note: the SRAM path is not synchronised via mb() and wmb().
+ */
+static void omap4_mb(void)
+{
+	if (dram_sync)
+		writel_relaxed(0, dram_sync);
+}
+
+/*
+ * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
+ *
+ * If a data is stalled inside asynchronous bridge because of back
+ * pressure, it may be accepted multiple times, creating pointer
+ * misalignment that will corrupt next transfers on that data path until
+ * next reset of the system. No recovery procedure once the issue is hit,
+ * the path remains consistently broken.
+ *
+ * Async bridges can be found on paths between MPU to EMIF and MPU to L3
+ * interconnects.
+ *
+ * This situation can happen only when the idle is initiated by a Master
+ * Request Disconnection (which is trigged by software when executing WFI
+ * on the CPU).
+ *
+ * The work-around for this errata needs all the initiators connected
+ * through an async bridge to ensure that data path is properly drained
+ * before issuing WFI. This condition will be met if one Strongly ordered
+ * access is performed to the target right before executing the WFI.
+ *
+ * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ * IO barrier ensure that there is no synchronisation loss on initiators
+ * operating on both interconnect port simultaneously.
+ *
+ * This is a stronger version of the OMAP4 memory barrier below, and
+ * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
+ * as well, and is necessary prior to executing a WFI.
+ */
+void omap_interconnect_sync(void)
+{
+	if (dram_sync && sram_sync) {
+		writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+		writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+		isb();
+	}
+}
+
+static int __init omap4_sram_init(void)
+{
+	struct device_node *np;
+	struct gen_pool *sram_pool;
+
+	np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+	if (!np)
+		pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+			__func__);
+	sram_pool = of_gen_pool_get(np, "sram", 0);
+	if (!sram_pool)
+		pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
+			__func__);
+	else
+		sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
+
+	return 0;
+}
+omap_arch_initcall(omap4_sram_init);
+
+/* Steal one page physical memory for barrier implementation */
+void __init omap_barrier_reserve_memblock(void)
+{
+	dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
+	dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
+}
+
+void __init omap_barriers_init(void)
+{
+	struct map_desc dram_io_desc[1];
+
+	dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+	dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
+	dram_io_desc[0].length = dram_sync_size;
+	dram_io_desc[0].type = MT_MEMORY_RW_SO;
+	iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+	dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+
+	pr_info("OMAP4: Map %pa to %p for dram barrier\n",
+		&dram_sync_paddr, dram_sync);
+
+	soc_mb = omap4_mb;
+}
+
+#endif
+
 void gic_dist_disable(void)
 void gic_dist_disable(void)
 {
 {
 	if (gic_dist_base_addr)
 	if (gic_dist_base_addr)

+ 3 - 5
arch/arm/mach-omap2/sleep44xx.S

@@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume)
 
 
 #endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
 #endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
 
 
-ENTRY(omap_bus_sync)
-	ret	lr
-ENDPROC(omap_bus_sync)
-
 ENTRY(omap_do_wfi)
 ENTRY(omap_do_wfi)
 	stmfd	sp!, {lr}
 	stmfd	sp!, {lr}
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
 	/* Drain interconnect write buffers. */
 	/* Drain interconnect write buffers. */
-	bl omap_bus_sync
+	bl	omap_interconnect_sync
+#endif
 
 
 	/*
 	/*
 	 * Execute an ISB instruction to ensure that all of the
 	 * Execute an ISB instruction to ensure that all of the

+ 1 - 0
arch/arm/mach-prima2/Kconfig

@@ -4,6 +4,7 @@ menuconfig ARCH_SIRF
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select GENERIC_IRQ_CHIP
 	select GENERIC_IRQ_CHIP
 	select NO_IOPORT_MAP
 	select NO_IOPORT_MAP
+	select REGMAP
 	select PINCTRL
 	select PINCTRL
 	select PINCTRL_SIRF
 	select PINCTRL_SIRF
 	help
 	help

+ 1 - 0
arch/arm/mach-prima2/pm.c

@@ -16,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/rtc/sirfsoc_rtciobrg.h>
 #include <linux/rtc/sirfsoc_rtciobrg.h>
+#include <asm/outercache.h>
 #include <asm/suspend.h>
 #include <asm/suspend.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/hardware/cache-l2x0.h>
 
 

+ 45 - 3
arch/arm/mach-prima2/rtciobrg.c

@@ -1,5 +1,5 @@
 /*
 /*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
+ * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
  * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
  * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
  *
  *
  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/regmap.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
@@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr)
 {
 {
 	unsigned long flags, val;
 	unsigned long flags, val;
 
 
+	/* TODO: add hwspinlock to sync with M3 */
 	spin_lock_irqsave(&rtciobrg_lock, flags);
 	spin_lock_irqsave(&rtciobrg_lock, flags);
 
 
 	val = __sirfsoc_rtc_iobrg_readl(addr);
 	val = __sirfsoc_rtc_iobrg_readl(addr);
@@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
+	 /* TODO: add hwspinlock to sync with M3 */
 	spin_lock_irqsave(&rtciobrg_lock, flags);
 	spin_lock_irqsave(&rtciobrg_lock, flags);
 
 
 	sirfsoc_rtc_iobrg_pre_writel(val, addr);
 	sirfsoc_rtc_iobrg_pre_writel(val, addr);
@@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
 }
 }
 EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
 EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
 
 
+
+static int regmap_iobg_regwrite(void *context, unsigned int reg,
+				   unsigned int val)
+{
+	sirfsoc_rtc_iobrg_writel(val, reg);
+	return 0;
+}
+
+static int regmap_iobg_regread(void *context, unsigned int reg,
+				  unsigned int *val)
+{
+	*val = (u32)sirfsoc_rtc_iobrg_readl(reg);
+	return 0;
+}
+
+static struct regmap_bus regmap_iobg = {
+	.reg_write = regmap_iobg_regwrite,
+	.reg_read = regmap_iobg_regread,
+};
+
+/**
+ * devm_regmap_init_iobg(): Initialise managed register map
+ *
+ * @iobg: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+				    const struct regmap_config *config)
+{
+	const struct regmap_bus *bus = &regmap_iobg;
+
+	return devm_regmap_init(dev, bus, dev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
+
 static const struct of_device_id rtciobrg_ids[] = {
 static const struct of_device_id rtciobrg_ids[] = {
 	{ .compatible = "sirf,prima2-rtciobg" },
 	{ .compatible = "sirf,prima2-rtciobg" },
 	{}
 	{}
@@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void)
 }
 }
 postcore_initcall(sirfsoc_rtciobrg_init);
 postcore_initcall(sirfsoc_rtciobrg_init);
 
 
-MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, "
-		"Barry Song <baohua.song@csr.com>");
+MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
+MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
 MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
 MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
 MODULE_LICENSE("GPL v2");
 MODULE_LICENSE("GPL v2");

+ 1 - 1
arch/arm/mach-shmobile/common.h

@@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void);
 extern void shmobile_smp_sleep(void);
 extern void shmobile_smp_sleep(void);
 extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
 extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
 			      unsigned long arg);
 			      unsigned long arg);
-extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
 extern void shmobile_boot_scu(void);
 extern void shmobile_boot_scu(void);
 extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
 extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);

+ 2 - 2
arch/arm/mach-shmobile/platsmp.c

@@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
 }
 }
 
 
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
-int shmobile_smp_cpu_disable(unsigned int cpu)
+bool shmobile_smp_cpu_can_disable(unsigned int cpu)
 {
 {
-	return 0; /* Hotplug of any CPU is supported */
+	return true; /* Hotplug of any CPU is supported */
 }
 }
 #endif
 #endif

+ 1 - 1
arch/arm/mach-shmobile/smp-r8a7790.c

@@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = {
 	.smp_prepare_cpus	= r8a7790_smp_prepare_cpus,
 	.smp_prepare_cpus	= r8a7790_smp_prepare_cpus,
 	.smp_boot_secondary	= shmobile_smp_apmu_boot_secondary,
 	.smp_boot_secondary	= shmobile_smp_apmu_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
 #endif

+ 1 - 1
arch/arm/mach-shmobile/smp-r8a7791.c

@@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = {
 	.smp_prepare_cpus	= r8a7791_smp_prepare_cpus,
 	.smp_prepare_cpus	= r8a7791_smp_prepare_cpus,
 	.smp_boot_secondary	= r8a7791_smp_boot_secondary,
 	.smp_boot_secondary	= r8a7791_smp_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
 #endif

+ 1 - 1
arch/arm/mach-shmobile/smp-sh73a0.c

@@ -68,7 +68,7 @@ struct smp_operations sh73a0_smp_ops __initdata = {
 	.smp_prepare_cpus	= sh73a0_smp_prepare_cpus,
 	.smp_prepare_cpus	= sh73a0_smp_prepare_cpus,
 	.smp_boot_secondary	= sh73a0_boot_secondary,
 	.smp_boot_secondary	= sh73a0_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_scu_cpu_die,
 	.cpu_die		= shmobile_smp_scu_cpu_die,
 	.cpu_kill		= shmobile_smp_scu_cpu_kill,
 	.cpu_kill		= shmobile_smp_scu_cpu_kill,
 #endif
 #endif

+ 1 - 1
arch/arm/mach-sunxi/Kconfig

@@ -35,7 +35,7 @@ config MACH_SUN7I
 	select SUN5I_HSTIMER
 	select SUN5I_HSTIMER
 
 
 config MACH_SUN8I
 config MACH_SUN8I
-	bool "Allwinner A23 (sun8i) SoCs support"
+	bool "Allwinner sun8i Family SoCs support"
 	default ARCH_SUNXI
 	default ARCH_SUNXI
 	select ARM_GIC
 	select ARM_GIC
 	select MFD_SUN6I_PRCM
 	select MFD_SUN6I_PRCM

+ 4 - 1
arch/arm/mach-sunxi/sunxi.c

@@ -67,10 +67,13 @@ MACHINE_END
 
 
 static const char * const sun8i_board_dt_compat[] = {
 static const char * const sun8i_board_dt_compat[] = {
 	"allwinner,sun8i-a23",
 	"allwinner,sun8i-a23",
+	"allwinner,sun8i-a33",
+	"allwinner,sun8i-h3",
 	NULL,
 	NULL,
 };
 };
 
 
-DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family")
+DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
+	.init_time	= sun6i_timer_init,
 	.dt_compat	= sun8i_board_dt_compat,
 	.dt_compat	= sun8i_board_dt_compat,
 	.init_late	= sunxi_dt_cpufreq_init,
 	.init_late	= sunxi_dt_cpufreq_init,
 MACHINE_END
 MACHINE_END

+ 1 - 0
arch/arm/mach-ux500/cache-l2x0.c

@@ -8,6 +8,7 @@
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 
 
+#include <asm/outercache.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/hardware/cache-l2x0.h>
 
 
 #include "db8500-regs.h"
 #include "db8500-regs.h"

+ 4 - 0
arch/arm/mm/Kconfig

@@ -883,6 +883,7 @@ config OUTER_CACHE
 
 
 config OUTER_CACHE_SYNC
 config OUTER_CACHE_SYNC
 	bool
 	bool
+	select ARM_HEAVY_MB
 	help
 	help
 	  The outer cache has a outer_cache_fns.sync function pointer
 	  The outer cache has a outer_cache_fns.sync function pointer
 	  that can be used to drain the write buffer of the outer cache.
 	  that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
 	  This option allows the use of custom mandatory barriers
 	  This option allows the use of custom mandatory barriers
 	  included via the mach/barriers.h file.
 	  included via the mach/barriers.h file.
 
 
+config ARM_HEAVY_MB
+	bool
+
 config ARCH_SUPPORTS_BIG_ENDIAN
 config ARCH_SUPPORTS_BIG_ENDIAN
 	bool
 	bool
 	help
 	help

+ 1 - 0
arch/arm/mm/abort-ev4.S

@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	ldr	r3, [r4]			@ read aborted ARM instruction
 	ldr	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable userspace access
 	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
 	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
 	tst	r3, #1 << 20			@ L = 1 -> write?
 	tst	r3, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
 	orreq	r1, r1, #1 << 11		@ yes.

+ 3 - 1
arch/arm/mm/abort-ev5t.S

@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
 	ldreq	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable user access
 	bic	r1, r1, #1 << 11		@ clear bits 11 of FSR
 	bic	r1, r1, #1 << 11		@ clear bits 11 of FSR
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ check write
 	tst	r3, #1 << 20			@ check write
 	orreq	r1, r1, #1 << 11
 	orreq	r1, r1, #1 << 11
 	b	do_DataAbort
 	b	do_DataAbort

+ 3 - 1
arch/arm/mm/abort-ev5tj.S

@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
 	bne	do_DataAbort
 	bne	do_DataAbort
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
 	ldreq	r3, [r4]			@ read aborted ARM instruction
-	do_ldrd_abort tmp=ip, insn=r3
+	uaccess_disable ip			@ disable userspace access
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 	orreq	r1, r1, #1 << 11		@ yes.
 	b	do_DataAbort
 	b	do_DataAbort

+ 5 - 3
arch/arm/mm/abort-ev6.S

@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
 	ldr	ip, =0x4107b36
 	ldr	ip, =0x4107b36
 	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
 	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
 	teq	ip, r3, lsr #4			@ r0 ARM1136?
 	teq	ip, r3, lsr #4			@ r0 ARM1136?
-	bne	do_DataAbort
+	bne	1f
 	tst	r5, #PSR_J_BIT			@ Java?
 	tst	r5, #PSR_J_BIT			@ Java?
 	tsteq	r5, #PSR_T_BIT			@ Thumb?
 	tsteq	r5, #PSR_T_BIT			@ Thumb?
-	bne	do_DataAbort
+	bne	1f
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	ldr	r3, [r4]			@ read aborted ARM instruction
 	ldr	r3, [r4]			@ read aborted ARM instruction
  ARM_BE8(rev	r3, r3)
  ARM_BE8(rev	r3, r3)
 
 
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	1f				@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 	orreq	r1, r1, #1 << 11		@ yes.
 #endif
 #endif
+1:	uaccess_disable ip			@ disable userspace access
 	b	do_DataAbort
 	b	do_DataAbort

+ 1 - 0
arch/arm/mm/abort-ev7.S

@@ -15,6 +15,7 @@
 ENTRY(v7_early_abort)
 ENTRY(v7_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+	uaccess_disable ip			@ disable userspace access
 
 
 	/*
 	/*
 	 * V6 code adjusts the returned DFSR.
 	 * V6 code adjusts the returned DFSR.

+ 2 - 0
arch/arm/mm/abort-lv4t.S

@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
 #endif
 #endif
 	bne	.data_thumb_abort
 	bne	.data_thumb_abort
 	ldr	r8, [r4]			@ read arm instruction
 	ldr	r8, [r4]			@ read arm instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 20			@ L = 1 -> write?
 	tst	r8, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
 	orreq	r1, r1, #1 << 11		@ yes.
 	and	r7, r8, #15 << 24
 	and	r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
 
 
 .data_thumb_abort:
 .data_thumb_abort:
 	ldrh	r8, [r4]			@ read instruction
 	ldrh	r8, [r4]			@ read instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 11			@ L = 1 -> write?
 	tst	r8, #1 << 11			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 8			@ yes
 	orreq	r1, r1, #1 << 8			@ yes
 	and	r7, r8, #15 << 12
 	and	r7, r8, #15 << 12

+ 6 - 8
arch/arm/mm/abort-macro.S

@@ -13,6 +13,7 @@
 	tst	\psr, #PSR_T_BIT
 	tst	\psr, #PSR_T_BIT
 	beq	not_thumb
 	beq	not_thumb
 	ldrh	\tmp, [\pc]			@ Read aborted Thumb instruction
 	ldrh	\tmp, [\pc]			@ Read aborted Thumb instruction
+	uaccess_disable ip			@ disable userspace access
 	and	\tmp, \tmp, # 0xfe00		@ Mask opcode field
 	and	\tmp, \tmp, # 0xfe00		@ Mask opcode field
 	cmp	\tmp, # 0x5600			@ Is it ldrsb?
 	cmp	\tmp, # 0x5600			@ Is it ldrsb?
 	orreq	\tmp, \tmp, #1 << 11		@ Set L-bit if yes
 	orreq	\tmp, \tmp, #1 << 11		@ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
  *   [7:4] == 1101
  *   [7:4] == 1101
  *    [20] == 0
  *    [20] == 0
  */
  */
-	.macro	do_ldrd_abort, tmp, insn
-	tst	\insn, #0x0e100000		@ [27:25,20] == 0
-	bne	not_ldrd
-	and	\tmp, \insn, #0x000000f0	@ [7:4] == 1101
-	cmp	\tmp, #0x000000d0
-	beq	do_DataAbort
-not_ldrd:
+	.macro	teq_ldrd, tmp, insn
+	mov	\tmp, #0x0e100000
+	orr	\tmp, #0x000000f0
+	and	\tmp, \insn, \tmp
+	teq	\tmp, #0x000000d0
 	.endm
 	.endm
-

+ 1 - 5
arch/arm/mm/cache-feroceon-l2.c

@@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
 	struct device_node *node;
 	struct device_node *node;
 	void __iomem *base;
 	void __iomem *base;
 	bool l2_wt_override = false;
 	bool l2_wt_override = false;
-	struct resource res;
 
 
 #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
 #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
 	l2_wt_override = true;
 	l2_wt_override = true;
@@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
 
 
 	node = of_find_matching_node(NULL, feroceon_ids);
 	node = of_find_matching_node(NULL, feroceon_ids);
 	if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
 	if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
-		if (of_address_to_resource(node, 0, &res))
-			return -ENODEV;
-
-		base = ioremap(res.start, resource_size(&res));
+		base = of_iomap(node, 0);
 		if (!base)
 		if (!base)
 			return -ENOMEM;
 			return -ENOMEM;
 
 

+ 5 - 0
arch/arm/mm/cache-l2x0.c

@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
 		}
 		}
 	}
 	}
 
 
+	if (of_property_read_bool(np, "arm,shared-override")) {
+		*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+	}
+
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
 
 
 	ret = of_property_read_u32(np, "arm,double-linefill", &val);
 	ret = of_property_read_u32(np, "arm,double-linefill", &val);

+ 14 - 10
arch/arm/mm/dma-mapping.c

@@ -39,6 +39,7 @@
 #include <asm/system_info.h>
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
 #include <asm/dma-contiguous.h>
 
 
+#include "dma.h"
 #include "mm.h"
 #include "mm.h"
 
 
 /*
 /*
@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 	size = PAGE_ALIGN(size);
 	size = PAGE_ALIGN(size);
 	want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 	want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 
 
-	if (is_coherent || nommu())
+	if (nommu())
+		addr = __alloc_simple_buffer(dev, size, gfp, &page);
+	else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+		addr = __alloc_from_contiguous(dev, size, prot, &page,
+					       caller, want_vaddr);
+	else if (is_coherent)
 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
 	else if (!(gfp & __GFP_WAIT))
 	else if (!(gfp & __GFP_WAIT))
 		addr = __alloc_from_pool(size, &page);
 		addr = __alloc_from_pool(size, &page);
-	else if (!dev_get_cma_area(dev))
-		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
 	else
 	else
-		addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+					    caller, want_vaddr);
 
 
 	if (page)
 	if (page)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	void *memory;
 	void *memory;
 
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 		return memory;
 		return memory;
 
 
-	return __dma_alloc(dev, size, handle, gfp, prot, true,
+	return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
 			   attrs, __builtin_return_address(0));
 			   attrs, __builtin_return_address(0));
 }
 }
 
 
@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 
 
 	size = PAGE_ALIGN(size);
 	size = PAGE_ALIGN(size);
 
 
-	if (is_coherent || nommu()) {
+	if (nommu()) {
 		__dma_free_buffer(page, size);
 		__dma_free_buffer(page, size);
-	} else if (__free_from_pool(cpu_addr, size)) {
+	} else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
 		return;
 		return;
 	} else if (!dev_get_cma_area(dev)) {
 	} else if (!dev_get_cma_area(dev)) {
-		if (want_vaddr)
+		if (want_vaddr && !is_coherent)
 			__dma_free_remap(cpu_addr, size);
 			__dma_free_remap(cpu_addr, size);
 		__dma_free_buffer(page, size);
 		__dma_free_buffer(page, size);
 	} else {
 	} else {
@@ -1971,7 +1975,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
 {
 {
 	int next_bitmap;
 	int next_bitmap;
 
 
-	if (mapping->nr_bitmaps > mapping->extensions)
+	if (mapping->nr_bitmaps >= mapping->extensions)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	next_bitmap = mapping->nr_bitmaps;
 	next_bitmap = mapping->nr_bitmaps;

+ 32 - 0
arch/arm/mm/dma.h

@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area			__glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area 		__glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area			cpu_cache.dma_map_area
+#define dmac_unmap_area 		cpu_cache.dma_unmap_area
+
+#endif
+
+#endif

+ 15 - 0
arch/arm/mm/flush.c

@@ -21,6 +21,21 @@
 
 
 #include "mm.h"
 #include "mm.h"
 
 
+#ifdef CONFIG_ARM_HEAVY_MB
+void (*soc_mb)(void);
+
+void arm_heavy_mb(void)
+{
+#ifdef CONFIG_OUTER_CACHE_SYNC
+	if (outer_cache.sync)
+		outer_cache.sync();
+#endif
+	if (soc_mb)
+		soc_mb();
+}
+EXPORT_SYMBOL(arm_heavy_mb);
+#endif
+
 #ifdef CONFIG_CPU_CACHE_VIPT
 #ifdef CONFIG_CPU_CACHE_VIPT
 
 
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)

+ 3 - 3
arch/arm/mm/highmem.c

@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
 
 
 	type = kmap_atomic_idx_push();
 	type = kmap_atomic_idx_push();
 
 
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 #ifdef CONFIG_DEBUG_HIGHMEM
 	/*
 	/*
@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
 
 
 	if (kvaddr >= (void *)FIXADDR_START) {
 	if (kvaddr >= (void *)FIXADDR_START) {
 		type = kmap_atomic_idx();
 		type = kmap_atomic_idx();
-		idx = type + KM_TYPE_NR * smp_processor_id();
+		idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 
 
 		if (cache_is_vivt())
 		if (cache_is_vivt())
 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
 		return page_address(page);
 		return page_address(page);
 
 
 	type = kmap_atomic_idx_push();
 	type = kmap_atomic_idx_push();
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 #ifdef CONFIG_DEBUG_HIGHMEM
 	BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
 	BUG_ON(!pte_none(get_fixmap_pte(vaddr)));

+ 23 - 10
arch/arm/mm/ioremap.c

@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
 }
 }
 #endif
 #endif
 
 
-void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
 	unsigned long offset, size_t size, unsigned int mtype, void *caller)
 	unsigned long offset, size_t size, unsigned int mtype, void *caller)
 {
 {
 	const struct mem_type *type;
 	const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
 		  unsigned int mtype)
 		  unsigned int mtype)
 {
 {
 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
 	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
-			__builtin_return_address(0));
+					__builtin_return_address(0));
 }
 }
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 
 
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
 				      unsigned int, void *) =
 				      unsigned int, void *) =
 	__arm_ioremap_caller;
 	__arm_ioremap_caller;
 
 
-void __iomem *
-__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
+{
+	return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
+				   __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+				   __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 {
 {
-	return arch_ioremap_caller(phys_addr, size, mtype,
-		__builtin_return_address(0));
+	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+				   __builtin_return_address(0));
 }
 }
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap_wc);
 
 
 /*
 /*
  * Remap an arbitrary physical address space into the kernel virtual
  * Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
 
 
 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
 
 
-void __arm_iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *cookie)
 {
 {
-	arch_iounmap(io_addr);
+	arch_iounmap(cookie);
 }
 }
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
 
 
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
 static int pci_ioremap_mem_type = MT_DEVICE;
 static int pci_ioremap_mem_type = MT_DEVICE;

+ 90 - 9
arch/arm/mm/mmu.c

@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_RDONLY,
 				L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	},
 	[MT_HIGH_VECTORS] = {
 	[MT_HIGH_VECTORS] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_USER | L_PTE_RDONLY,
 				L_PTE_USER | L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	},
 	[MT_MEMORY_RWX] = {
 	[MT_MEMORY_RWX] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
 }
 }
 EXPORT_SYMBOL(get_mem_type);
 EXPORT_SYMBOL(get_mem_type);
 
 
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+	__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+	pgd_t *pgd = pgd_offset_k(addr);
+	pud_t *pud = pud_offset(pgd, addr);
+	pmd_t *pmd = pmd_offset(pud, addr);
+
+	return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+	pmd_t *pmd;
+
+	/*
+	 * The early fixmap range spans multiple pmds, for which
+	 * we are not prepared:
+	 */
+	BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+		     != FIXADDR_TOP >> PMD_SHIFT);
+
+	pmd = fixmap_pmd(FIXADDR_TOP);
+	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+	pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
 /*
 /*
  * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
  * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
  * As a result, this can only be called with preemption disabled, as under
  * As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 {
 {
 	unsigned long vaddr = __fix_to_virt(idx);
 	unsigned long vaddr = __fix_to_virt(idx);
-	pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+	pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 
 
 	/* Make sure fixmap region does not exceed available allocation. */
 	/* Make sure fixmap region does not exceed available allocation. */
 	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
 	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
 	}
 	}
 
 
 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-	    md->virtual >= PAGE_OFFSET &&
+	    md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
 	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 		pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
 		pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1072,6 +1113,7 @@ void __init sanity_check_meminfo(void)
 	int highmem = 0;
 	int highmem = 0;
 	phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
 	phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
 	struct memblock_region *reg;
 	struct memblock_region *reg;
+	bool should_use_highmem = false;
 
 
 	for_each_memblock(memory, reg) {
 	for_each_memblock(memory, reg) {
 		phys_addr_t block_start = reg->base;
 		phys_addr_t block_start = reg->base;
@@ -1090,6 +1132,7 @@ void __init sanity_check_meminfo(void)
 				pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
 				pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
 					  &block_start, &block_end);
 					  &block_start, &block_end);
 				memblock_remove(reg->base, reg->size);
 				memblock_remove(reg->base, reg->size);
+				should_use_highmem = true;
 				continue;
 				continue;
 			}
 			}
 
 
@@ -1100,6 +1143,7 @@ void __init sanity_check_meminfo(void)
 					  &block_start, &block_end, &vmalloc_limit);
 					  &block_start, &block_end, &vmalloc_limit);
 				memblock_remove(vmalloc_limit, overlap_size);
 				memblock_remove(vmalloc_limit, overlap_size);
 				block_end = vmalloc_limit;
 				block_end = vmalloc_limit;
+				should_use_highmem = true;
 			}
 			}
 		}
 		}
 
 
@@ -1134,6 +1178,9 @@ void __init sanity_check_meminfo(void)
 		}
 		}
 	}
 	}
 
 
+	if (should_use_highmem)
+		pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+
 	high_memory = __va(arm_lowmem_limit - 1) + 1;
 	high_memory = __va(arm_lowmem_limit - 1) + 1;
 
 
 	/*
 	/*
@@ -1213,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
 
 
 /*
 /*
  * Set up the device mappings.  Since we clear out the page tables for all
  * Set up the device mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function.  This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings.  This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
  */
  */
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
 {
@@ -1231,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
 
 
 	early_trap_init(vectors);
 	early_trap_init(vectors);
 
 
-	for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+	/*
+	 * Clear page table except top pmd used by early fixmaps
+	 */
+	for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
 		pmd_clear(pmd_off_k(addr));
 		pmd_clear(pmd_off_k(addr));
 
 
 	/*
 	/*
@@ -1483,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
 
 
 #endif
 #endif
 
 
+static void __init early_fixmap_shutdown(void)
+{
+	int i;
+	unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+	pte_offset_fixmap = pte_offset_late_fixmap;
+	pmd_clear(fixmap_pmd(va));
+	local_flush_tlb_kernel_page(va);
+
+	for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+		pte_t *pte;
+		struct map_desc map;
+
+		map.virtual = fix_to_virt(i);
+		pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+		/* Only i/o device mappings are supported ATM */
+		if (pte_none(*pte) ||
+		    (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+			continue;
+
+		map.pfn = pte_pfn(*pte);
+		map.type = MT_DEVICE;
+		map.length = PAGE_SIZE;
+
+		create_mapping(&map);
+	}
+}
+
 /*
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
  * maps, and sets up the zero page, bad page and bad page tables.
@@ -1494,7 +1573,9 @@ void __init paging_init(const struct machine_desc *mdesc)
 	build_mem_type_table();
 	build_mem_type_table();
 	prepare_page_table();
 	prepare_page_table();
 	map_lowmem();
 	map_lowmem();
+	memblock_set_current_limit(arm_lowmem_limit);
 	dma_contiguous_remap();
 	dma_contiguous_remap();
+	early_fixmap_shutdown();
 	devicemaps_init(mdesc);
 	devicemaps_init(mdesc);
 	kmap_init();
 	kmap_init();
 	tcm_init();
 	tcm_init();

+ 26 - 13
arch/arm/mm/nommu.c

@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
 }
 }
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 
 
-void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
-			   size_t size, unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
+				   unsigned int mtype, void *caller)
 {
 {
-	return __arm_ioremap_pfn(pfn, offset, size, mtype);
+	return (void __iomem *)phys_addr;
 }
 }
 
 
-void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
-			    unsigned int mtype)
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
 {
 {
-	return (void __iomem *)phys_addr;
+	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
+				    __builtin_return_address(0));
 }
 }
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap);
 
 
-void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+				    __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
 
 
-void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
-				   unsigned int mtype, void *caller)
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+	return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+				    __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iounmap(volatile void __iomem *addr)
 {
 {
-	return __arm_ioremap(phys_addr, size, mtype);
 }
 }
+EXPORT_SYMBOL(__iounmap);
 
 
 void (*arch_iounmap)(volatile void __iomem *);
 void (*arch_iounmap)(volatile void __iomem *);
 
 
-void __arm_iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
 {
 {
 }
 }
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);

+ 10 - 0
arch/arm/mm/pgd.c

@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 		if (!new_pte)
 		if (!new_pte)
 			goto no_pte;
 			goto no_pte;
 
 
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Modify the PTE pointer to have the correct domain.  This
+		 * needs to be the vectors domain to avoid the low vectors
+		 * being unmapped.
+		 */
+		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
 		init_pud = pud_offset(init_pgd, 0);
 		init_pud = pud_offset(init_pgd, 0);
 		init_pmd = pmd_offset(init_pud, 0);
 		init_pmd = pmd_offset(init_pud, 0);
 		init_pte = pte_offset_map(init_pmd, 0);
 		init_pte = pte_offset_map(init_pmd, 0);

+ 9 - 5
arch/arm/mm/proc-v7.S

@@ -274,7 +274,10 @@ __v7_ca15mp_setup:
 __v7_b15mp_setup:
 __v7_b15mp_setup:
 __v7_ca17mp_setup:
 __v7_ca17mp_setup:
 	mov	r10, #0
 	mov	r10, #0
-1:
+1:	adr	r12, __v7_setup_stack		@ the local stack
+	stmia	r12, {r0-r5, lr}		@ v7_invalidate_l1 touches r0-r6
+	bl      v7_invalidate_l1
+	ldmia	r12, {r0-r5, lr}
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	ALT_SMP(mrc	p15, 0, r0, c1, c0, 1)
 	ALT_SMP(mrc	p15, 0, r0, c1, c0, 1)
 	ALT_UP(mov	r0, #(1 << 6))		@ fake it for UP
 	ALT_UP(mov	r0, #(1 << 6))		@ fake it for UP
@@ -283,7 +286,7 @@ __v7_ca17mp_setup:
 	orreq	r0, r0, r10			@ Enable CPU-specific SMP bits
 	orreq	r0, r0, r10			@ Enable CPU-specific SMP bits
 	mcreq	p15, 0, r0, c1, c0, 1
 	mcreq	p15, 0, r0, c1, c0, 1
 #endif
 #endif
-	b	__v7_setup
+	b	__v7_setup_cont
 
 
 /*
 /*
  * Errata:
  * Errata:
@@ -413,10 +416,11 @@ __v7_pj4b_setup:
 
 
 __v7_setup:
 __v7_setup:
 	adr	r12, __v7_setup_stack		@ the local stack
 	adr	r12, __v7_setup_stack		@ the local stack
-	stmia	r12, {r0-r5, r7, r9, r11, lr}
+	stmia	r12, {r0-r5, lr}		@ v7_invalidate_l1 touches r0-r6
 	bl      v7_invalidate_l1
 	bl      v7_invalidate_l1
-	ldmia	r12, {r0-r5, r7, r9, r11, lr}
+	ldmia	r12, {r0-r5, lr}
 
 
+__v7_setup_cont:
 	and	r0, r9, #0xff000000		@ ARM?
 	and	r0, r9, #0xff000000		@ ARM?
 	teq	r0, #0x41000000
 	teq	r0, #0x41000000
 	bne	__errata_finish
 	bne	__errata_finish
@@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
 
 
 	.align	2
 	.align	2
 __v7_setup_stack:
 __v7_setup_stack:
-	.space	4 * 11				@ 11 registers
+	.space	4 * 7				@ 12 registers
 
 
 	__INITDATA
 	__INITDATA
 
 

+ 1 - 1
arch/arm/vdso/Makefile

@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 VDSO_LDFLAGS += -nostdlib -shared
 VDSO_LDFLAGS += -nostdlib -shared
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
 
 
 obj-$(CONFIG_VDSO) += vdso.o
 obj-$(CONFIG_VDSO) += vdso.o
 extra-$(CONFIG_VDSO) += vdso.lds
 extra-$(CONFIG_VDSO) += vdso.lds

+ 33 - 23
arch/arm/vdso/vdsomunge.c

@@ -45,13 +45,11 @@
  * it does.
  * it does.
  */
  */
 
 
-#define _GNU_SOURCE
-
 #include <byteswap.h>
 #include <byteswap.h>
 #include <elf.h>
 #include <elf.h>
 #include <errno.h>
 #include <errno.h>
-#include <error.h>
 #include <fcntl.h>
 #include <fcntl.h>
+#include <stdarg.h>
 #include <stdbool.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdlib.h>
@@ -82,11 +80,25 @@
 #define EF_ARM_ABI_FLOAT_HARD 0x400
 #define EF_ARM_ABI_FLOAT_HARD 0x400
 #endif
 #endif
 
 
+static int failed;
+static const char *argv0;
 static const char *outfile;
 static const char *outfile;
 
 
+static void fail(const char *fmt, ...)
+{
+	va_list ap;
+
+	failed = 1;
+	fprintf(stderr, "%s: ", argv0);
+	va_start(ap, fmt);
+	vfprintf(stderr, fmt, ap);
+	va_end(ap);
+	exit(EXIT_FAILURE);
+}
+
 static void cleanup(void)
 static void cleanup(void)
 {
 {
-	if (error_message_count > 0 && outfile != NULL)
+	if (failed && outfile != NULL)
 		unlink(outfile);
 		unlink(outfile);
 }
 }
 
 
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
 	int infd;
 	int infd;
 
 
 	atexit(cleanup);
 	atexit(cleanup);
+	argv0 = argv[0];
 
 
 	if (argc != 3)
 	if (argc != 3)
-		error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
+		fail("Usage: %s [infile] [outfile]\n", argv[0]);
 
 
 	infile = argv[1];
 	infile = argv[1];
 	outfile = argv[2];
 	outfile = argv[2];
 
 
 	infd = open(infile, O_RDONLY);
 	infd = open(infile, O_RDONLY);
 	if (infd < 0)
 	if (infd < 0)
-		error(EXIT_FAILURE, errno, "Cannot open %s", infile);
+		fail("Cannot open %s: %s\n", infile, strerror(errno));
 
 
 	if (fstat(infd, &stat) != 0)
 	if (fstat(infd, &stat) != 0)
-		error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
+		fail("Failed stat for %s: %s\n", infile, strerror(errno));
 
 
 	inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
 	inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
 	if (inbuf == MAP_FAILED)
 	if (inbuf == MAP_FAILED)
-		error(EXIT_FAILURE, errno, "Failed to map %s", infile);
+		fail("Failed to map %s: %s\n", infile, strerror(errno));
 
 
 	close(infd);
 	close(infd);
 
 
 	inhdr = inbuf;
 	inhdr = inbuf;
 
 
 	if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
 	if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
-		error(EXIT_FAILURE, 0, "Not an ELF file");
+		fail("Not an ELF file\n");
 
 
 	if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
 	if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
-		error(EXIT_FAILURE, 0, "Unsupported ELF class");
+		fail("Unsupported ELF class\n");
 
 
 	swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
 	swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
 
 
 	if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
 	if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
-		error(EXIT_FAILURE, 0, "Not a shared object");
+		fail("Not a shared object\n");
 
 
-	if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
-		error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
-		      inhdr->e_machine);
-	}
+	if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
+		fail("Unsupported architecture %#x\n", inhdr->e_machine);
 
 
 	e_flags = read_elf_word(inhdr->e_flags, swap);
 	e_flags = read_elf_word(inhdr->e_flags, swap);
 
 
 	if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
 	if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
-		error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
-		      EF_ARM_EABI_VERSION(e_flags));
+		fail("Unsupported EABI version %#x\n",
+		     EF_ARM_EABI_VERSION(e_flags));
 	}
 	}
 
 
 	if (e_flags & EF_ARM_ABI_FLOAT_HARD)
 	if (e_flags & EF_ARM_ABI_FLOAT_HARD)
-		error(EXIT_FAILURE, 0,
-		      "Unexpected hard-float flag set in e_flags");
+		fail("Unexpected hard-float flag set in e_flags\n");
 
 
 	clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
 	clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
 
 
 	outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
 	outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
 	if (outfd < 0)
 	if (outfd < 0)
-		error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
+		fail("Cannot open %s: %s\n", outfile, strerror(errno));
 
 
 	if (ftruncate(outfd, stat.st_size) != 0)
 	if (ftruncate(outfd, stat.st_size) != 0)
-		error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
+		fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
 
 
 	outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 	outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 		      outfd, 0);
 		      outfd, 0);
 	if (outbuf == MAP_FAILED)
 	if (outbuf == MAP_FAILED)
-		error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
+		fail("Failed to map %s: %s\n", outfile, strerror(errno));
 
 
 	close(outfd);
 	close(outfd);
 
 
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
 	}
 	}
 
 
 	if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
 	if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
-		error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
+		fail("Failed to sync %s: %s\n", outfile, strerror(errno));
 
 
 	return EXIT_SUCCESS;
 	return EXIT_SUCCESS;
 }
 }

Some files were not shown because too many files changed in this diff