瀏覽代碼

Merge branch 'linux-4.19.y' of http://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into ti-linux-4.19.y

* 'linux-4.19.y' of http://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable: (880 commits)
  Linux 4.19.24
  mm: proc: smaps_rollup: fix pss_locked calculation
  drm/i915: Prevent a race during I915_GEM_MMAP ioctl with WC set
  drm/i915: Block fbdev HPD processing during suspend
  drm/vkms: Fix license inconsistent
  drm: Use array_size() when creating lease
  dm thin: fix bug where bio that overwrites thin block ignores FUA
  dm crypt: don't overallocate the integrity tag space
  x86/a.out: Clear the dump structure initially
  md/raid1: don't clear bitmap bits on interrupted recovery.
  signal: Restore the stop PTRACE_EVENT_EXIT
  scsi: sd: fix entropy gathering for most rotational disks
  x86/platform/UV: Use efi_runtime_lock to serialise BIOS calls
  tracing/uprobes: Fix output for multiple string arguments
  s390/zcrypt: fix specification exception on z196 during ap probe
  alpha: Fix Eiger NR_IRQS to 128
  alpha: fix page fault handling for r16-r18 targets
  Revert "mm: slowly shrink slabs with a relatively small number of objects"
  Revert "mm: don't reclaim inodes with many attached pages"
  Revert "nfsd4: return default lease period"
  ...

Signed-off-by: LCPD Auto Merger <lcpd_integration@list.ti.com>
LCPD Auto Merger 6 年之前
父節點
當前提交
35936a1576
共有 100 個文件被更改,包括 729 次插入337 次删除
  1. 1 0
      Documentation/devicetree/bindings/eeprom/at24.txt
  2. 3 1
      Documentation/filesystems/proc.txt
  3. 1 1
      Makefile
  4. 3 3
      arch/alpha/include/asm/irq.h
  5. 1 1
      arch/alpha/mm/fault.c
  6. 2 1
      arch/arc/include/asm/perf_event.h
  7. 32 8
      arch/arc/lib/memset-archs.S
  8. 2 1
      arch/arc/mm/init.c
  9. 1 1
      arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
  10. 1 1
      arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
  11. 1 2
      arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
  12. 1 1
      arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
  13. 26 5
      arch/arm/boot/dts/da850-evm.dts
  14. 1 1
      arch/arm/boot/dts/da850.dtsi
  15. 6 10
      arch/arm/boot/dts/gemini-dlink-dir-685.dts
  16. 13 4
      arch/arm/boot/dts/imx51-zii-rdu1.dts
  17. 2 2
      arch/arm/boot/dts/kirkwood-dnskw.dtsi
  18. 6 3
      arch/arm/boot/dts/mmp2.dtsi
  19. 1 0
      arch/arm/boot/dts/omap4-sdp.dts
  20. 6 3
      arch/arm/boot/dts/omap5-board-common.dtsi
  21. 11 1
      arch/arm/boot/dts/omap5-cm-t54.dts
  22. 11 0
      arch/arm/include/asm/assembler.h
  23. 1 0
      arch/arm/include/asm/cputype.h
  24. 49 12
      arch/arm/include/asm/proc-fns.h
  25. 2 2
      arch/arm/include/asm/thread_info.h
  26. 43 6
      arch/arm/include/asm/uaccess.h
  27. 2 2
      arch/arm/kernel/bugs.c
  28. 3 3
      arch/arm/kernel/head-common.S
  29. 25 15
      arch/arm/kernel/setup.c
  30. 44 36
      arch/arm/kernel/signal.c
  31. 46 0
      arch/arm/kernel/smp.c
  32. 6 2
      arch/arm/kernel/sys_oabi-compat.c
  33. 1 5
      arch/arm/lib/copy_from_user.S
  34. 5 1
      arch/arm/lib/copy_to_user.S
  35. 2 1
      arch/arm/lib/uaccess_with_memcpy.c
  36. 1 1
      arch/arm/mach-cns3xxx/pcie.c
  37. 5 1
      arch/arm/mach-integrator/impd1.c
  38. 1 2
      arch/arm/mach-iop32x/n2100.c
  39. 35 1
      arch/arm/mach-omap2/omap-wakeupgen.c
  40. 3 3
      arch/arm/mach-omap2/omap_hwmod.c
  41. 1 1
      arch/arm/mach-pxa/cm-x300.c
  42. 1 1
      arch/arm/mach-pxa/littleton.c
  43. 1 1
      arch/arm/mach-pxa/zeus.c
  44. 2 4
      arch/arm/mach-tango/pm.c
  45. 7 0
      arch/arm/mach-tango/pm.h
  46. 2 0
      arch/arm/mach-tango/setup.c
  47. 10 0
      arch/arm/mm/proc-macros.S
  48. 2 15
      arch/arm/mm/proc-v7-bugs.c
  49. 8 12
      arch/arm/vfp/vfpmodule.c
  50. 17 0
      arch/arm64/boot/dts/marvell/armada-ap806.dtsi
  51. 18 12
      arch/arm64/include/asm/assembler.h
  52. 24 8
      arch/arm64/include/asm/io.h
  53. 3 0
      arch/arm64/include/asm/kvm_arm.h
  54. 6 1
      arch/arm64/include/asm/memory.h
  55. 1 1
      arch/arm64/include/uapi/asm/ptrace.h
  56. 0 1
      arch/arm64/kernel/entry-ftrace.S
  57. 2 3
      arch/arm64/kernel/head.S
  58. 3 1
      arch/arm64/kernel/hibernate.c
  59. 2 0
      arch/arm64/kernel/hyp-stub.S
  60. 7 2
      arch/arm64/kernel/kaslr.c
  61. 1 0
      arch/arm64/kernel/perf_event.c
  62. 4 5
      arch/arm64/kernel/sys_compat.c
  63. 4 5
      arch/arm64/kernel/syscall.c
  64. 1 1
      arch/arm64/kvm/hyp/switch.c
  65. 3 0
      arch/arm64/mm/cache.S
  66. 5 1
      arch/arm64/mm/flush.c
  67. 4 0
      arch/mips/Kconfig
  68. 31 0
      arch/mips/bcm47xx/setup.c
  69. 6 0
      arch/mips/boot/dts/img/boston.dts
  70. 0 2
      arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
  71. 1 1
      arch/mips/include/uapi/asm/inst.h
  72. 9 3
      arch/mips/jz4740/board-qi_lb60.c
  73. 1 1
      arch/mips/kernel/mips-cm.c
  74. 5 63
      arch/mips/lantiq/irq.c
  75. 6 1
      arch/mips/loongson64/common/reset.c
  76. 3 1
      arch/mips/pci/msi-octeon.c
  77. 5 5
      arch/mips/pci/pci-octeon.c
  78. 1 0
      arch/mips/ralink/Kconfig
  79. 1 0
      arch/mips/sibyte/common/Makefile
  80. 14 0
      arch/mips/sibyte/common/dma.c
  81. 3 2
      arch/mips/vdso/Makefile
  82. 5 1
      arch/nds32/mm/Makefile
  83. 7 15
      arch/powerpc/include/asm/book3s/64/pgtable.h
  84. 1 1
      arch/powerpc/include/asm/fadump.h
  85. 1 1
      arch/powerpc/include/asm/uaccess.h
  86. 8 2
      arch/powerpc/kernel/fadump.c
  87. 4 0
      arch/powerpc/kernel/vmlinux.lds.S
  88. 4 1
      arch/powerpc/kvm/powerpc.c
  89. 3 1
      arch/powerpc/mm/fault.c
  90. 22 0
      arch/powerpc/mm/pgtable-book3s64.c
  91. 6 1
      arch/powerpc/perf/isa207-common.c
  92. 1 1
      arch/powerpc/platforms/powernv/pci-ioda-tce.c
  93. 2 0
      arch/powerpc/platforms/pseries/dlpar.c
  94. 5 2
      arch/powerpc/platforms/pseries/hotplug-memory.c
  95. 14 4
      arch/powerpc/xmon/xmon.c
  96. 6 0
      arch/riscv/include/asm/pgtable-bits.h
  97. 4 4
      arch/riscv/include/asm/pgtable.h
  98. 1 1
      arch/riscv/kernel/ptrace.c
  99. 2 3
      arch/s390/include/asm/mmu_context.h
  100. 2 2
      arch/s390/include/uapi/asm/zcrypt.h

+ 1 - 0
Documentation/devicetree/bindings/eeprom/at24.txt

@@ -27,6 +27,7 @@ Required properties:
                 "atmel,24c256",
                 "atmel,24c256",
                 "atmel,24c512",
                 "atmel,24c512",
                 "atmel,24c1024",
                 "atmel,24c1024",
+                "atmel,24c2048",
 
 
                 If <manufacturer> is not "atmel", then a fallback must be used
                 If <manufacturer> is not "atmel", then a fallback must be used
                 with the same <model> and "atmel" as manufacturer.
                 with the same <model> and "atmel" as manufacturer.

+ 3 - 1
Documentation/filesystems/proc.txt

@@ -496,7 +496,9 @@ manner. The codes are the following:
 
 
 Note that there is no guarantee that every flag and associated mnemonic will
 Note that there is no guarantee that every flag and associated mnemonic will
 be present in all further kernel releases. Things get changed, the flags may
 be present in all further kernel releases. Things get changed, the flags may
-be vanished or the reverse -- new added.
+be vanished or the reverse -- new added. Interpretation of their meaning
+might change in future as well. So each consumer of these flags has to
+follow each specific kernel version for the exact semantic.
 
 
 This file is only present if the CONFIG_MMU kernel configuration option is
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
 enabled.

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 19
 PATCHLEVEL = 19
-SUBLEVEL = 15
+SUBLEVEL = 24
 EXTRAVERSION =
 EXTRAVERSION =
 NAME = "People's Front"
 NAME = "People's Front"
 
 

+ 3 - 3
arch/alpha/include/asm/irq.h

@@ -56,15 +56,15 @@
 
 
 #elif defined(CONFIG_ALPHA_DP264) || \
 #elif defined(CONFIG_ALPHA_DP264) || \
       defined(CONFIG_ALPHA_LYNX)  || \
       defined(CONFIG_ALPHA_LYNX)  || \
-      defined(CONFIG_ALPHA_SHARK) || \
-      defined(CONFIG_ALPHA_EIGER)
+      defined(CONFIG_ALPHA_SHARK)
 # define NR_IRQS	64
 # define NR_IRQS	64
 
 
 #elif defined(CONFIG_ALPHA_TITAN)
 #elif defined(CONFIG_ALPHA_TITAN)
 #define NR_IRQS		80
 #define NR_IRQS		80
 
 
 #elif defined(CONFIG_ALPHA_RAWHIDE) || \
 #elif defined(CONFIG_ALPHA_RAWHIDE) || \
-	defined(CONFIG_ALPHA_TAKARA)
+      defined(CONFIG_ALPHA_TAKARA) || \
+      defined(CONFIG_ALPHA_EIGER)
 # define NR_IRQS	128
 # define NR_IRQS	128
 
 
 #elif defined(CONFIG_ALPHA_WILDFIRE)
 #elif defined(CONFIG_ALPHA_WILDFIRE)

+ 1 - 1
arch/alpha/mm/fault.c

@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
 /* Macro for exception fixup code to access integer registers.  */
 /* Macro for exception fixup code to access integer registers.  */
 #define dpf_reg(r)							\
 #define dpf_reg(r)							\
 	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
 	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
-				 (r) <= 18 ? (r)+8 : (r)-10])
+				 (r) <= 18 ? (r)+10 : (r)-10])
 
 
 asmlinkage void
 asmlinkage void
 do_page_fault(unsigned long address, unsigned long mmcsr,
 do_page_fault(unsigned long address, unsigned long mmcsr,

+ 2 - 1
arch/arc/include/asm/perf_event.h

@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
 
 
 	/* counts condition */
 	/* counts condition */
 	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
 	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+	/* All jump instructions that are taken */
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
 	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
 	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
 #ifdef CONFIG_ISA_ARCV2
 #ifdef CONFIG_ISA_ARCV2
 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",

+ 32 - 8
arch/arc/lib/memset-archs.S

@@ -7,11 +7,39 @@
  */
  */
 
 
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+#include <asm/cache.h>
 
 
-#undef PREALLOC_NOT_AVAIL
+/*
+ * The memset implementation below is optimized to use prefetchw and prealloc
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
+ * If you want to implement optimized memset for other possible L1 data cache
+ * line lengths (32B and 128B) you should rewrite code carefully checking
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
+ * don't belongs to memset area.
+ */
+
+#if L1_CACHE_SHIFT == 6
+
+.macro PREALLOC_INSTR	reg, off
+	prealloc	[\reg, \off]
+.endm
+
+.macro PREFETCHW_INSTR	reg, off
+	prefetchw	[\reg, \off]
+.endm
+
+#else
+
+.macro PREALLOC_INSTR
+.endm
+
+.macro PREFETCHW_INSTR
+.endm
+
+#endif
 
 
 ENTRY_CFI(memset)
 ENTRY_CFI(memset)
-	prefetchw [r0]		; Prefetch the write location
+	PREFETCHW_INSTR	r0, 0	; Prefetch the first write location
 	mov.f	0, r2
 	mov.f	0, r2
 ;;; if size is zero
 ;;; if size is zero
 	jz.d	[blink]
 	jz.d	[blink]
@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
 
 
 	lpnz	@.Lset64bytes
 	lpnz	@.Lset64bytes
 	;; LOOP START
 	;; LOOP START
-#ifdef PREALLOC_NOT_AVAIL
-	prefetchw [r3, 64]	;Prefetch the next write location
-#else
-	prealloc  [r3, 64]
-#endif
+	PREALLOC_INSTR	r3, 64	; alloc next line w/o fetching
+
 #ifdef CONFIG_ARC_HAS_LL64
 #ifdef CONFIG_ARC_HAS_LL64
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
 	lsr.f	lp_count, r2, 5 ;Last remaining  max 124 bytes
 	lsr.f	lp_count, r2, 5 ;Last remaining  max 124 bytes
 	lpnz	.Lset32bytes
 	lpnz	.Lset32bytes
 	;; LOOP START
 	;; LOOP START
-	prefetchw   [r3, 32]	;Prefetch the next write location
 #ifdef CONFIG_ARC_HAS_LL64
 #ifdef CONFIG_ARC_HAS_LL64
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]

+ 2 - 1
arch/arc/mm/init.c

@@ -138,7 +138,8 @@ void __init setup_arch_memory(void)
 	 */
 	 */
 
 
 	memblock_add_node(low_mem_start, low_mem_sz, 0);
 	memblock_add_node(low_mem_start, low_mem_sz, 0);
-	memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
+	memblock_reserve(CONFIG_LINUX_LINK_BASE,
+			 __pa(_end) - CONFIG_LINUX_LINK_BASE);
 
 
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
 	if (initrd_start)
 	if (initrd_start)

+ 1 - 1
arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts

@@ -13,7 +13,7 @@
 		bootargs = "console=ttyS4,115200 earlyprintk";
 		bootargs = "console=ttyS4,115200 earlyprintk";
 	};
 	};
 
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x40000000>;
 		reg = <0x80000000 0x40000000>;
 	};
 	};
 
 

+ 1 - 1
arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts

@@ -13,7 +13,7 @@
 		bootargs = "earlyprintk";
 		bootargs = "earlyprintk";
 	};
 	};
 
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x20000000>;
 		reg = <0x80000000 0x20000000>;
 	};
 	};
 
 

+ 1 - 2
arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts

@@ -14,7 +14,7 @@
 		bootargs = "console=ttyS4,115200 earlyprintk";
 		bootargs = "console=ttyS4,115200 earlyprintk";
 	};
 	};
 
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x40000000>;
 		reg = <0x80000000 0x40000000>;
 	};
 	};
 
 
@@ -322,4 +322,3 @@
 &adc {
 &adc {
 	status = "okay";
 	status = "okay";
 };
 };
-

+ 1 - 1
arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts

@@ -17,7 +17,7 @@
 		bootargs = "console=ttyS4,115200 earlyprintk";
 		bootargs = "console=ttyS4,115200 earlyprintk";
 	};
 	};
 
 
-	memory {
+	memory@80000000 {
 		reg = <0x80000000 0x20000000>;
 		reg = <0x80000000 0x20000000>;
 	};
 	};
 
 

+ 26 - 5
arch/arm/boot/dts/da850-evm.dts

@@ -94,6 +94,28 @@
 		regulator-boot-on;
 		regulator-boot-on;
 	};
 	};
 
 
+	baseboard_3v3: fixedregulator-3v3 {
+		/* TPS73701DCQ */
+		compatible = "regulator-fixed";
+		regulator-name = "baseboard_3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vbat>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	baseboard_1v8: fixedregulator-1v8 {
+		/* TPS73701DCQ */
+		compatible = "regulator-fixed";
+		regulator-name = "baseboard_1v8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vbat>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
 	backlight_lcd: backlight-regulator {
 	backlight_lcd: backlight-regulator {
 		compatible = "regulator-fixed";
 		compatible = "regulator-fixed";
 		regulator-name = "lcd_backlight_pwr";
 		regulator-name = "lcd_backlight_pwr";
@@ -105,7 +127,7 @@
 
 
 	sound {
 	sound {
 		compatible = "simple-audio-card";
 		compatible = "simple-audio-card";
-		simple-audio-card,name = "DA850/OMAP-L138 EVM";
+		simple-audio-card,name = "DA850-OMAPL138 EVM";
 		simple-audio-card,widgets =
 		simple-audio-card,widgets =
 			"Line", "Line In",
 			"Line", "Line In",
 			"Line", "Line Out";
 			"Line", "Line Out";
@@ -210,10 +232,9 @@
 
 
 		/* Regulators */
 		/* Regulators */
 		IOVDD-supply = <&vdcdc2_reg>;
 		IOVDD-supply = <&vdcdc2_reg>;
-		/* Derived from VBAT: Baseboard 3.3V / 1.8V */
-		AVDD-supply = <&vbat>;
-		DRVDD-supply = <&vbat>;
-		DVDD-supply = <&vbat>;
+		AVDD-supply = <&baseboard_3v3>;
+		DRVDD-supply = <&baseboard_3v3>;
+		DVDD-supply = <&baseboard_1v8>;
 	};
 	};
 	tca6416: gpio@20 {
 	tca6416: gpio@20 {
 		compatible = "ti,tca6416";
 		compatible = "ti,tca6416";

+ 1 - 1
arch/arm/boot/dts/da850.dtsi

@@ -478,7 +478,7 @@
 		clocksource: timer@20000 {
 		clocksource: timer@20000 {
 			compatible = "ti,da830-timer";
 			compatible = "ti,da830-timer";
 			reg = <0x20000 0x1000>;
 			reg = <0x20000 0x1000>;
-			interrupts = <12>, <13>;
+			interrupts = <21>, <22>;
 			interrupt-names = "tint12", "tint34";
 			interrupt-names = "tint12", "tint34";
 			clocks = <&pll0_auxclk>;
 			clocks = <&pll0_auxclk>;
 		};
 		};

+ 6 - 10
arch/arm/boot/dts/gemini-dlink-dir-685.dts

@@ -274,20 +274,16 @@
 				read-only;
 				read-only;
 			};
 			};
 			/*
 			/*
-			 * Between the boot loader and the rootfs is the kernel
-			 * in a custom Storlink format flashed from the boot
-			 * menu. The rootfs is in squashfs format.
+			 * This firmware image contains the kernel catenated
+			 * with the squashfs root filesystem. For some reason
+			 * this is called "upgrade" on the vendor system.
 			 */
 			 */
-			partition@1800c0 {
-				label = "rootfs";
-				reg = <0x001800c0 0x01dbff40>;
-				read-only;
-			};
-			partition@1f40000 {
+			partition@40000 {
 				label = "upgrade";
 				label = "upgrade";
-				reg = <0x01f40000 0x00040000>;
+				reg = <0x00040000 0x01f40000>;
 				read-only;
 				read-only;
 			};
 			};
+			/* RGDB, Residental Gateway Database? */
 			partition@1f80000 {
 			partition@1f80000 {
 				label = "rgdb";
 				label = "rgdb";
 				reg = <0x01f80000 0x00040000>;
 				reg = <0x01f80000 0x00040000>;

+ 13 - 4
arch/arm/boot/dts/imx51-zii-rdu1.dts

@@ -477,6 +477,15 @@
 };
 };
 
 
 &gpio1 {
 &gpio1 {
+	gpio-line-names = "", "", "", "",
+			  "", "", "", "",
+			  "", "hp-amp-shutdown-b", "", "",
+			  "", "", "", "",
+			  "", "", "", "",
+			  "", "", "", "",
+			  "", "", "", "",
+			  "", "", "", "";
+
 	unused-sd3-wp-gpio {
 	unused-sd3-wp-gpio {
 		/*
 		/*
 		 * See pinctrl_esdhc1 below for more details on this
 		 * See pinctrl_esdhc1 below for more details on this
@@ -501,9 +510,6 @@
 	hpa1: amp@60 {
 	hpa1: amp@60 {
 		compatible = "ti,tpa6130a2";
 		compatible = "ti,tpa6130a2";
 		reg = <0x60>;
 		reg = <0x60>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_ampgpio>;
-		power-gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
 		Vdd-supply = <&reg_3p3v>;
 		Vdd-supply = <&reg_3p3v>;
 	};
 	};
 
 
@@ -677,7 +683,10 @@
 };
 };
 
 
 &iomuxc {
 &iomuxc {
-	pinctrl_ampgpio: ampgpiogrp {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hog>;
+
+	pinctrl_hog: hoggrp {
 		fsl,pins = <
 		fsl,pins = <
 			MX51_PAD_GPIO1_9__GPIO1_9		0x5e
 			MX51_PAD_GPIO1_9__GPIO1_9		0x5e
 		>;
 		>;

+ 2 - 2
arch/arm/boot/dts/kirkwood-dnskw.dtsi

@@ -36,8 +36,8 @@
 		compatible = "gpio-fan";
 		compatible = "gpio-fan";
 		pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
 		pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
 		pinctrl-names = "default";
 		pinctrl-names = "default";
-		gpios = <&gpio1 14 GPIO_ACTIVE_LOW
-			 &gpio1 13 GPIO_ACTIVE_LOW>;
+		gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
+			 &gpio1 13 GPIO_ACTIVE_HIGH>;
 		gpio-fan,speed-map = <0    0
 		gpio-fan,speed-map = <0    0
 				      3000 1
 				      3000 1
 				      6000 2>;
 				      6000 2>;

+ 6 - 3
arch/arm/boot/dts/mmp2.dtsi

@@ -220,12 +220,15 @@
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 
-			twsi2: i2c@d4025000 {
+			twsi2: i2c@d4031000 {
 				compatible = "mrvl,mmp-twsi";
 				compatible = "mrvl,mmp-twsi";
-				reg = <0xd4025000 0x1000>;
-				interrupts = <58>;
+				reg = <0xd4031000 0x1000>;
+				interrupt-parent = <&intcmux17>;
+				interrupts = <0>;
 				clocks = <&soc_clocks MMP2_CLK_TWSI1>;
 				clocks = <&soc_clocks MMP2_CLK_TWSI1>;
 				resets = <&soc_clocks MMP2_CLK_TWSI1>;
 				resets = <&soc_clocks MMP2_CLK_TWSI1>;
+				#address-cells = <1>;
+				#size-cells = <0>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 

+ 1 - 0
arch/arm/boot/dts/omap4-sdp.dts

@@ -33,6 +33,7 @@
 		gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;  /* gpio line 48 */
 		gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;  /* gpio line 48 */
 		enable-active-high;
 		enable-active-high;
 		regulator-boot-on;
 		regulator-boot-on;
+		startup-delay-us = <25000>;
 	};
 	};
 
 
 	vbat: fixedregulator-vbat {
 	vbat: fixedregulator-vbat {

+ 6 - 3
arch/arm/boot/dts/omap5-board-common.dtsi

@@ -320,7 +320,8 @@
 
 
 	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
 	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
 		pinctrl-single,pins = <
 		pinctrl-single,pins = <
-			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+			/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
 		>;
 		>;
 	};
 	};
 
 
@@ -388,7 +389,8 @@
 
 
 	palmas: palmas@48 {
 	palmas: palmas@48 {
 		compatible = "ti,palmas";
 		compatible = "ti,palmas";
-		interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
+		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
 		reg = <0x48>;
 		reg = <0x48>;
 		interrupt-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
 		#interrupt-cells = <2>;
@@ -654,7 +656,8 @@
 		pinctrl-names = "default";
 		pinctrl-names = "default";
 		pinctrl-0 = <&twl6040_pins>;
 		pinctrl-0 = <&twl6040_pins>;
 
 
-		interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
+		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+		interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
 
 
 		/* audpwron gpio defined in the board specific dts */
 		/* audpwron gpio defined in the board specific dts */
 
 

+ 11 - 1
arch/arm/boot/dts/omap5-cm-t54.dts

@@ -181,6 +181,13 @@
 			OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6)  /* llib_wakereqin.gpio1_wk15 */
 			OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6)  /* llib_wakereqin.gpio1_wk15 */
 		>;
 		>;
 	};
 	};
+
+	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+		pinctrl-single,pins = <
+			/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
+		>;
+	};
 };
 };
 
 
 &omap5_pmx_core {
 &omap5_pmx_core {
@@ -414,8 +421,11 @@
 
 
 	palmas: palmas@48 {
 	palmas: palmas@48 {
 		compatible = "ti,palmas";
 		compatible = "ti,palmas";
-		interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
 		reg = <0x48>;
 		reg = <0x48>;
+		pinctrl-0 = <&palmas_sys_nirq_pins>;
+		pinctrl-names = "default";
+		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
 		#interrupt-cells = <2>;
 		ti,system-power-controller;
 		ti,system-power-controller;

+ 11 - 0
arch/arm/include/asm/assembler.h

@@ -467,6 +467,17 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 #endif
 #endif
 	.endm
 	.endm
 
 
+	.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+	sub	\tmp, \limit, #1
+	subs	\tmp, \tmp, \addr	@ tmp = limit - 1 - addr
+	addhs	\tmp, \tmp, #1		@ if (tmp >= 0) {
+	subhss	\tmp, \tmp, \size	@ tmp = limit - (addr + size) }
+	movlo	\addr, #0		@ if (tmp < 0) addr = NULL
+	csdb
+#endif
+	.endm
+
 	.macro	uaccess_disable, tmp, isb=1
 	.macro	uaccess_disable, tmp, isb=1
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 	/*
 	/*

+ 1 - 0
arch/arm/include/asm/cputype.h

@@ -111,6 +111,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 
 
 extern unsigned int processor_id;
 extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
 
 
 #ifdef CONFIG_CPU_CP15
 #ifdef CONFIG_CPU_CP15
 #define read_cpuid(reg)							\
 #define read_cpuid(reg)							\

+ 49 - 12
arch/arm/include/asm/proc-fns.h

@@ -23,7 +23,7 @@ struct mm_struct;
 /*
 /*
  * Don't change this structure - ASM code relies on it.
  * Don't change this structure - ASM code relies on it.
  */
  */
-extern struct processor {
+struct processor {
 	/* MISC
 	/* MISC
 	 * get data abort address/flags
 	 * get data abort address/flags
 	 */
 	 */
@@ -79,9 +79,13 @@ extern struct processor {
 	unsigned int suspend_size;
 	unsigned int suspend_size;
 	void (*do_suspend)(void *);
 	void (*do_suspend)(void *);
 	void (*do_resume)(void *);
 	void (*do_resume)(void *);
-} processor;
+};
 
 
 #ifndef MULTI_CPU
 #ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
 extern void cpu_proc_init(void);
 extern void cpu_proc_init(void);
 extern void cpu_proc_fin(void);
 extern void cpu_proc_fin(void);
 extern int cpu_do_idle(void);
 extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
 extern void cpu_do_suspend(void *);
 extern void cpu_do_suspend(void *);
 extern void cpu_do_resume(void *);
 extern void cpu_do_resume(void *);
 #else
 #else
-#define cpu_proc_init			processor._proc_init
-#define cpu_proc_fin			processor._proc_fin
-#define cpu_reset			processor.reset
-#define cpu_do_idle			processor._do_idle
-#define cpu_dcache_clean_area		processor.dcache_clean_area
-#define cpu_set_pte_ext			processor.set_pte_ext
-#define cpu_do_switch_mm		processor.switch_mm
 
 
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend			processor.do_suspend
-#define cpu_do_resume			processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised.  We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f)			cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f)			cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+	unsigned int cpu = smp_processor_id();
+	*cpu_vtable[cpu] = *p;
+	WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+		     cpu_vtable[0]->dcache_clean_area);
+	WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+		     cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f)			processor.f
+#define PROC_TABLE(f)			processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+	processor = *p;
+}
+#endif
+
+#define cpu_proc_init			PROC_VTABLE(_proc_init)
+#define cpu_check_bugs			PROC_VTABLE(check_bugs)
+#define cpu_proc_fin			PROC_VTABLE(_proc_fin)
+#define cpu_reset			PROC_VTABLE(reset)
+#define cpu_do_idle			PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area		PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext			PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm		PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend			PROC_VTABLE(do_suspend)
+#define cpu_do_resume			PROC_VTABLE(do_resume)
 #endif
 #endif
 
 
 extern void cpu_resume(void);
 extern void cpu_resume(void);

+ 2 - 2
arch/arm/include/asm/thread_info.h

@@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
 struct user_vfp;
 struct user_vfp;
 struct user_vfp_exc;
 struct user_vfp_exc;
 
 
-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
-					   struct user_vfp_exc __user *);
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
+					   struct user_vfp_exc *);
 extern int vfp_restore_user_hwstate(struct user_vfp *,
 extern int vfp_restore_user_hwstate(struct user_vfp *,
 				    struct user_vfp_exc *);
 				    struct user_vfp_exc *);
 #endif
 #endif

+ 43 - 6
arch/arm/include/asm/uaccess.h

@@ -69,6 +69,14 @@ extern int __put_user_bad(void);
 static inline void set_fs(mm_segment_t fs)
 static inline void set_fs(mm_segment_t fs)
 {
 {
 	current_thread_info()->addr_limit = fs;
 	current_thread_info()->addr_limit = fs;
+
+	/*
+	 * Prevent a mispredicted conditional call to set_fs from forwarding
+	 * the wrong address limit to access_ok under speculation.
+	 */
+	dsb(nsh);
+	isb();
+
 	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
 	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
 }
 }
 
 
@@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
 #define __inttype(x) \
 #define __inttype(x) \
 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 
 
+/*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size)			\
+	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+						    size_t size)
+{
+	void __user *safe_ptr = (void __user *)ptr;
+	unsigned long tmp;
+
+	asm volatile(
+	"	sub	%1, %3, #1\n"
+	"	subs	%1, %1, %0\n"
+	"	addhs	%1, %1, #1\n"
+	"	subhss	%1, %1, %2\n"
+	"	movlo	%0, #0\n"
+	: "+r" (safe_ptr), "=&r" (tmp)
+	: "r" (size), "r" (current_thread_info()->addr_limit)
+	: "cc");
+
+	csdb();
+	return safe_ptr;
+}
+
 /*
 /*
  * Single-value transfer routines.  They automatically use the right
  * Single-value transfer routines.  They automatically use the right
  * size if we just have the right pointer type.  Note that the functions
  * size if we just have the right pointer type.  Note that the functions
@@ -362,6 +396,14 @@ do {									\
 	__pu_err;							\
 	__pu_err;							\
 })
 })
 
 
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
 #define __put_user(x, ptr)						\
 #define __put_user(x, ptr)						\
 ({									\
 ({									\
 	long __pu_err = 0;						\
 	long __pu_err = 0;						\
@@ -369,12 +411,6 @@ do {									\
 	__pu_err;							\
 	__pu_err;							\
 })
 })
 
 
-#define __put_user_error(x, ptr, err)					\
-({									\
-	__put_user_switch((x), (ptr), (err), __put_user_nocheck);	\
-	(void) 0;							\
-})
-
 #define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
 #define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
 	do {								\
 	do {								\
 		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
 		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
@@ -454,6 +490,7 @@ do {									\
 	: "r" (x), "i" (-EFAULT)				\
 	: "r" (x), "i" (-EFAULT)				\
 	: "cc")
 	: "cc")
 
 
+#endif /* !CONFIG_CPU_SPECTRE */
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 extern unsigned long __must_check
 extern unsigned long __must_check

+ 2 - 2
arch/arm/kernel/bugs.c

@@ -6,8 +6,8 @@
 void check_other_bugs(void)
 void check_other_bugs(void)
 {
 {
 #ifdef MULTI_CPU
 #ifdef MULTI_CPU
-	if (processor.check_bugs)
-		processor.check_bugs();
+	if (cpu_check_bugs)
+		cpu_check_bugs();
 #endif
 #endif
 }
 }
 
 

+ 3 - 3
arch/arm/kernel/head-common.S

@@ -145,6 +145,9 @@ __mmap_switched_data:
 #endif
 #endif
 	.size	__mmap_switched_data, . - __mmap_switched_data
 	.size	__mmap_switched_data, . - __mmap_switched_data
 
 
+	__FINIT
+	.text
+
 /*
 /*
  * This provides a C-API version of __lookup_processor_type
  * This provides a C-API version of __lookup_processor_type
  */
  */
@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
 	ldmfd	sp!, {r4 - r6, r9, pc}
 	ldmfd	sp!, {r4 - r6, r9, pc}
 ENDPROC(lookup_processor_type)
 ENDPROC(lookup_processor_type)
 
 
-	__FINIT
-	.text
-
 /*
 /*
  * Read processor ID register (CP#15, CR0), and look up in the linker-built
  * Read processor ID register (CP#15, CR0), and look up in the linker-built
  * supported processor list.  Note that we can't use the absolute addresses
  * supported processor list.  Note that we can't use the absolute addresses

+ 25 - 15
arch/arm/kernel/setup.c

@@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2);
 
 
 #ifdef MULTI_CPU
 #ifdef MULTI_CPU
 struct processor processor __ro_after_init;
 struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+	[0] = &processor,
+};
+#endif
 #endif
 #endif
 #ifdef MULTI_TLB
 #ifdef MULTI_TLB
 struct cpu_tlb_fns cpu_tlb __ro_after_init;
 struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void)
 }
 }
 #endif
 #endif
 
 
-static void __init setup_processor(void)
+/*
+ * locate processor in the list of supported processor types.  The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
 {
 {
-	struct proc_info_list *list;
+	struct proc_info_list *list = lookup_processor_type(midr);
 
 
-	/*
-	 * locate processor in the list of supported processor
-	 * types.  The linker builds this table for us from the
-	 * entries in arch/arm/mm/proc-*.S
-	 */
-	list = lookup_processor_type(read_cpuid_id());
 	if (!list) {
 	if (!list) {
-		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
-		       read_cpuid_id());
-		while (1);
+		pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+		       smp_processor_id(), midr);
+		while (1)
+		/* can't use cpu_relax() here as it may require MMU setup */;
 	}
 	}
 
 
+	return list;
+}
+
+static void __init setup_processor(void)
+{
+	unsigned int midr = read_cpuid_id();
+	struct proc_info_list *list = lookup_processor(midr);
+
 	cpu_name = list->cpu_name;
 	cpu_name = list->cpu_name;
 	__cpu_architecture = __get_cpu_architecture();
 	__cpu_architecture = __get_cpu_architecture();
 
 
-#ifdef MULTI_CPU
-	processor = *list->proc;
-#endif
+	init_proc_vtable(list->proc);
 #ifdef MULTI_TLB
 #ifdef MULTI_TLB
 	cpu_tlb = *list->tlb;
 	cpu_tlb = *list->tlb;
 #endif
 #endif
@@ -700,7 +710,7 @@ static void __init setup_processor(void)
 #endif
 #endif
 
 
 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
-		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+		list->cpu_name, midr, midr & 15,
 		proc_arch[cpu_architecture()], get_cr());
 		proc_arch[cpu_architecture()], get_cr());
 
 
 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",

+ 44 - 36
arch/arm/kernel/signal.c

@@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
 		kframe->magic = IWMMXT_MAGIC;
 		kframe->magic = IWMMXT_MAGIC;
 		kframe->size = IWMMXT_STORAGE_SIZE;
 		kframe->size = IWMMXT_STORAGE_SIZE;
 		iwmmxt_task_copy(current_thread_info(), &kframe->storage);
 		iwmmxt_task_copy(current_thread_info(), &kframe->storage);
-
-		err = __copy_to_user(frame, kframe, sizeof(*frame));
 	} else {
 	} else {
 		/*
 		/*
 		 * For bug-compatibility with older kernels, some space
 		 * For bug-compatibility with older kernels, some space
@@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
 		 * Set the magic and size appropriately so that properly
 		 * Set the magic and size appropriately so that properly
 		 * written userspace can skip it reliably:
 		 * written userspace can skip it reliably:
 		 */
 		 */
-		__put_user_error(DUMMY_MAGIC, &frame->magic, err);
-		__put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+		*kframe = (struct iwmmxt_sigframe) {
+			.magic = DUMMY_MAGIC,
+			.size  = IWMMXT_STORAGE_SIZE,
+		};
 	}
 	}
 
 
+	err = __copy_to_user(frame, kframe, sizeof(*kframe));
+
 	return err;
 	return err;
 }
 }
 
 
@@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
 
 
 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 {
 {
-	const unsigned long magic = VFP_MAGIC;
-	const unsigned long size = VFP_STORAGE_SIZE;
+	struct vfp_sigframe kframe;
 	int err = 0;
 	int err = 0;
 
 
-	__put_user_error(magic, &frame->magic, err);
-	__put_user_error(size, &frame->size, err);
+	memset(&kframe, 0, sizeof(kframe));
+	kframe.magic = VFP_MAGIC;
+	kframe.size = VFP_STORAGE_SIZE;
 
 
+	err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
 	if (err)
 	if (err)
-		return -EFAULT;
+		return err;
 
 
-	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
+	return __copy_to_user(frame, &kframe, sizeof(kframe));
 }
 }
 
 
 static int restore_vfp_context(char __user **auxp)
 static int restore_vfp_context(char __user **auxp)
@@ -288,30 +291,35 @@ static int
 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
 {
 {
 	struct aux_sigframe __user *aux;
 	struct aux_sigframe __user *aux;
+	struct sigcontext context;
 	int err = 0;
 	int err = 0;
 
 
-	__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
-	__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
-	__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
-	__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
-	__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
-	__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
-	__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
-	__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
-	__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
-	__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
-	__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
-	__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
-	__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
-	__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
-	__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
-	__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
-	__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
-
-	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
-	__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
-	__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
-	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+	context = (struct sigcontext) {
+		.arm_r0        = regs->ARM_r0,
+		.arm_r1        = regs->ARM_r1,
+		.arm_r2        = regs->ARM_r2,
+		.arm_r3        = regs->ARM_r3,
+		.arm_r4        = regs->ARM_r4,
+		.arm_r5        = regs->ARM_r5,
+		.arm_r6        = regs->ARM_r6,
+		.arm_r7        = regs->ARM_r7,
+		.arm_r8        = regs->ARM_r8,
+		.arm_r9        = regs->ARM_r9,
+		.arm_r10       = regs->ARM_r10,
+		.arm_fp        = regs->ARM_fp,
+		.arm_ip        = regs->ARM_ip,
+		.arm_sp        = regs->ARM_sp,
+		.arm_lr        = regs->ARM_lr,
+		.arm_pc        = regs->ARM_pc,
+		.arm_cpsr      = regs->ARM_cpsr,
+
+		.trap_no       = current->thread.trap_no,
+		.error_code    = current->thread.error_code,
+		.fault_address = current->thread.address,
+		.oldmask       = set->sig[0],
+	};
+
+	err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
 
 
 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 
 
@@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
 	if (err == 0)
 	if (err == 0)
 		err |= preserve_vfp_context(&aux->vfp);
 		err |= preserve_vfp_context(&aux->vfp);
 #endif
 #endif
-	__put_user_error(0, &aux->end_magic, err);
+	err |= __put_user(0, &aux->end_magic);
 
 
 	return err;
 	return err;
 }
 }
@@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 	/*
 	/*
 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
 	 */
 	 */
-	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+	err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
 
 
 	err |= setup_sigframe(frame, regs, set);
 	err |= setup_sigframe(frame, regs, set);
 	if (err == 0)
 	if (err == 0)
@@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 
 
 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 
 
-	__put_user_error(0, &frame->sig.uc.uc_flags, err);
-	__put_user_error(NULL, &frame->sig.uc.uc_link, err);
+	err |= __put_user(0, &frame->sig.uc.uc_flags);
+	err |= __put_user(NULL, &frame->sig.uc.uc_link);
 
 
 	err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
 	err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
 	err |= setup_sigframe(&frame->sig, regs, set);
 	err |= setup_sigframe(&frame->sig, regs, set);

+ 46 - 0
arch/arm/kernel/smp.c

@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/pgalloc.h>
+#include <asm/procinfo.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
 #endif
 #endif
 }
 }
 
 
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+	if (!cpu_vtable[cpu])
+		cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+	return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+	init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+	return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 {
 	int ret;
 	int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 	if (!smp_ops.smp_boot_secondary)
 	if (!smp_ops.smp_boot_secondary)
 		return -ENOSYS;
 		return -ENOSYS;
 
 
+	ret = secondary_biglittle_prepare(cpu);
+	if (ret)
+		return ret;
+
 	/*
 	/*
 	 * We need to tell the secondary core where to find
 	 * We need to tell the secondary core where to find
 	 * its stack and the page tables.
 	 * its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
 	struct mm_struct *mm = &init_mm;
 	struct mm_struct *mm = &init_mm;
 	unsigned int cpu;
 	unsigned int cpu;
 
 
+	secondary_biglittle_init();
+
 	/*
 	/*
 	 * The identity mapping is uncached (strongly ordered), so
 	 * The identity mapping is uncached (strongly ordered), so
 	 * switch away from it before attempting any exclusive accesses.
 	 * switch away from it before attempting any exclusive accesses.
@@ -693,6 +724,21 @@ void smp_send_stop(void)
 		pr_warn("SMP: failed to stop secondary CPUs\n");
 		pr_warn("SMP: failed to stop secondary CPUs\n");
 }
 }
 
 
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
+ */
+void panic_smp_self_stop(void)
+{
+	pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+	         smp_processor_id());
+	set_cpu_online(smp_processor_id(), false);
+	while (1)
+		cpu_relax();
+}
+
 /*
 /*
  * not supported here
  * not supported here
  */
  */

+ 6 - 2
arch/arm/kernel/sys_oabi-compat.c

@@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
 				    int maxevents, int timeout)
 				    int maxevents, int timeout)
 {
 {
 	struct epoll_event *kbuf;
 	struct epoll_event *kbuf;
+	struct oabi_epoll_event e;
 	mm_segment_t fs;
 	mm_segment_t fs;
 	long ret, err, i;
 	long ret, err, i;
 
 
@@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
 	set_fs(fs);
 	set_fs(fs);
 	err = 0;
 	err = 0;
 	for (i = 0; i < ret; i++) {
 	for (i = 0; i < ret; i++) {
-		__put_user_error(kbuf[i].events, &events->events, err);
-		__put_user_error(kbuf[i].data,   &events->data,   err);
+		e.events = kbuf[i].events;
+		e.data = kbuf[i].data;
+		err = __copy_to_user(events, &e, sizeof(e));
+		if (err)
+			break;
 		events++;
 		events++;
 	}
 	}
 	kfree(kbuf);
 	kfree(kbuf);

+ 1 - 5
arch/arm/lib/copy_from_user.S

@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
 #ifdef CONFIG_CPU_SPECTRE
 #ifdef CONFIG_CPU_SPECTRE
 	get_thread_info r3
 	get_thread_info r3
 	ldr	r3, [r3, #TI_ADDR_LIMIT]
 	ldr	r3, [r3, #TI_ADDR_LIMIT]
-	adds	ip, r1, r2	@ ip=addr+size
-	sub	r3, r3, #1	@ addr_limit - 1
-	cmpcc	ip, r3		@ if (addr+size > addr_limit - 1)
-	movcs	r1, #0		@ addr = NULL
-	csdb
+	uaccess_mask_range_ptr r1, r2, r3, ip
 #endif
 #endif
 
 
 #include "copy_template.S"
 #include "copy_template.S"

+ 5 - 1
arch/arm/lib/copy_to_user.S

@@ -94,6 +94,11 @@
 
 
 ENTRY(__copy_to_user_std)
 ENTRY(__copy_to_user_std)
 WEAK(arm_copy_to_user)
 WEAK(arm_copy_to_user)
+#ifdef CONFIG_CPU_SPECTRE
+	get_thread_info r3
+	ldr	r3, [r3, #TI_ADDR_LIMIT]
+	uaccess_mask_range_ptr r0, r2, r3, ip
+#endif
 
 
 #include "copy_template.S"
 #include "copy_template.S"
 
 
@@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
 	rsb	r0, r0, r2
 	rsb	r0, r0, r2
 	copy_abort_end
 	copy_abort_end
 	.popsection
 	.popsection
-

+ 2 - 1
arch/arm/lib/uaccess_with_memcpy.c

@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 		n = __copy_to_user_std(to, from, n);
 		n = __copy_to_user_std(to, from, n);
 		uaccess_restore(ua_flags);
 		uaccess_restore(ua_flags);
 	} else {
 	} else {
-		n = __copy_to_user_memcpy(to, from, n);
+		n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
+					  from, n);
 	}
 	}
 	return n;
 	return n;
 }
 }

+ 1 - 1
arch/arm/mach-cns3xxx/pcie.c

@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
 	} else /* remote PCI bus */
 	} else /* remote PCI bus */
 		base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
 		base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
 
 
-	return base + (where & 0xffc) + (devfn << 12);
+	return base + where + (devfn << 12);
 }
 }
 
 
 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,

+ 5 - 1
arch/arm/mach-integrator/impd1.c

@@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev)
 					      sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
 					      sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
 					      GFP_KERNEL);
 					      GFP_KERNEL);
 			chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
 			chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
-			mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
+			mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
+						  "lm%x:00700", dev->id);
+			if (!lookup || !chipname || !mmciname)
+				return -ENOMEM;
+
 			lookup->dev_id = mmciname;
 			lookup->dev_id = mmciname;
 			/*
 			/*
 			 * Offsets on GPIO block 1:
 			 * Offsets on GPIO block 1:

+ 1 - 2
arch/arm/mach-iop32x/n2100.c

@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
 /*
 /*
  * N2100 PCI.
  * N2100 PCI.
  */
  */
-static int __init
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 {
 	int irq;
 	int irq;
 
 

+ 35 - 1
arch/arm/mach-omap2/omap-wakeupgen.c

@@ -50,6 +50,9 @@
 #define OMAP4_NR_BANKS		4
 #define OMAP4_NR_BANKS		4
 #define OMAP4_NR_IRQS		128
 #define OMAP4_NR_IRQS		128
 
 
+#define SYS_NIRQ1_EXT_SYS_IRQ_1	7
+#define SYS_NIRQ2_EXT_SYS_IRQ_2	119
+
 static void __iomem *wakeupgen_base;
 static void __iomem *wakeupgen_base;
 static void __iomem *sar_base;
 static void __iomem *sar_base;
 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
 	irq_chip_unmask_parent(d);
 	irq_chip_unmask_parent(d);
 }
 }
 
 
+/*
+ * The sys_nirq pins bypass peripheral modules and are wired directly
+ * to MPUSS wakeupgen. They get automatically inverted for GIC.
+ */
+static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	bool inverted = false;
+
+	switch (type) {
+	case IRQ_TYPE_LEVEL_LOW:
+		type &= ~IRQ_TYPE_LEVEL_MASK;
+		type |= IRQ_TYPE_LEVEL_HIGH;
+		inverted = true;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		type &= ~IRQ_TYPE_EDGE_BOTH;
+		type |= IRQ_TYPE_EDGE_RISING;
+		inverted = true;
+		break;
+	default:
+		break;
+	}
+
+	if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
+	    d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
+		pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
+			d->hwirq);
+
+	return irq_chip_set_type_parent(d, type);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
 
 
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
 	.irq_mask		= wakeupgen_mask,
 	.irq_mask		= wakeupgen_mask,
 	.irq_unmask		= wakeupgen_unmask,
 	.irq_unmask		= wakeupgen_unmask,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
-	.irq_set_type		= irq_chip_set_type_parent,
+	.irq_set_type		= wakeupgen_irq_set_type,
 	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
 	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 	.irq_set_affinity	= irq_chip_set_affinity_parent,

+ 3 - 3
arch/arm/mach-omap2/omap_hwmod.c

@@ -2423,7 +2423,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
  * a stub; implementing this properly requires iclk autoidle usecounting in
  * a stub; implementing this properly requires iclk autoidle usecounting in
  * the clock code.   No return value.
  * the clock code.   No return value.
  */
  */
-static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
+static void _setup_iclk_autoidle(struct omap_hwmod *oh)
 {
 {
 	struct omap_hwmod_ocp_if *os;
 	struct omap_hwmod_ocp_if *os;
 
 
@@ -2454,7 +2454,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
  * reset.  Returns 0 upon success or a negative error code upon
  * reset.  Returns 0 upon success or a negative error code upon
  * failure.
  * failure.
  */
  */
-static int __init _setup_reset(struct omap_hwmod *oh)
+static int _setup_reset(struct omap_hwmod *oh)
 {
 {
 	int r;
 	int r;
 
 
@@ -2515,7 +2515,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
  *
  *
  * No return value.
  * No return value.
  */
  */
-static void __init _setup_postsetup(struct omap_hwmod *oh)
+static void _setup_postsetup(struct omap_hwmod *oh)
 {
 {
 	u8 postsetup_state;
 	u8 postsetup_state;
 
 

+ 1 - 1
arch/arm/mach-pxa/cm-x300.c

@@ -558,7 +558,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
 	.exit		= cm_x300_u2d_exit,
 	.exit		= cm_x300_u2d_exit,
 };
 };
 
 
-static void cm_x300_init_u2d(void)
+static void __init cm_x300_init_u2d(void)
 {
 {
 	pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
 	pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
 }
 }

+ 1 - 1
arch/arm/mach-pxa/littleton.c

@@ -184,7 +184,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
 	.lcd_conn		= LCD_COLOR_TFT_16BPP,
 	.lcd_conn		= LCD_COLOR_TFT_16BPP,
 };
 };
 
 
-static void littleton_init_lcd(void)
+static void __init littleton_init_lcd(void)
 {
 {
 	pxa_set_fb_info(NULL, &littleton_lcd_info);
 	pxa_set_fb_info(NULL, &littleton_lcd_info);
 }
 }

+ 1 - 1
arch/arm/mach-pxa/zeus.c

@@ -559,7 +559,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
 	.flags		= ENABLE_PORT_ALL | POWER_SENSE_LOW,
 	.flags		= ENABLE_PORT_ALL | POWER_SENSE_LOW,
 };
 };
 
 
-static void zeus_register_ohci(void)
+static void __init zeus_register_ohci(void)
 {
 {
 	/* Port 2 is shared between host and client interface. */
 	/* Port 2 is shared between host and client interface. */
 	UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
 	UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;

+ 2 - 4
arch/arm/mach-tango/pm.c

@@ -3,6 +3,7 @@
 #include <linux/suspend.h>
 #include <linux/suspend.h>
 #include <asm/suspend.h>
 #include <asm/suspend.h>
 #include "smc.h"
 #include "smc.h"
+#include "pm.h"
 
 
 static int tango_pm_powerdown(unsigned long arg)
 static int tango_pm_powerdown(unsigned long arg)
 {
 {
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
 	.valid = suspend_valid_only_mem,
 	.valid = suspend_valid_only_mem,
 };
 };
 
 
-static int __init tango_pm_init(void)
+void __init tango_pm_init(void)
 {
 {
 	suspend_set_ops(&tango_pm_ops);
 	suspend_set_ops(&tango_pm_ops);
-	return 0;
 }
 }
-
-late_initcall(tango_pm_init);

+ 7 - 0
arch/arm/mach-tango/pm.h

@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_SUSPEND
+void __init tango_pm_init(void);
+#else
+#define tango_pm_init NULL
+#endif

+ 2 - 0
arch/arm/mach-tango/setup.c

@@ -2,6 +2,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/hardware/cache-l2x0.h>
 #include "smc.h"
 #include "smc.h"
+#include "pm.h"
 
 
 static void tango_l2c_write(unsigned long val, unsigned int reg)
 static void tango_l2c_write(unsigned long val, unsigned int reg)
 {
 {
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
 	.dt_compat	= tango_dt_compat,
 	.dt_compat	= tango_dt_compat,
 	.l2c_aux_mask	= ~0,
 	.l2c_aux_mask	= ~0,
 	.l2c_write_sec	= tango_l2c_write,
 	.l2c_write_sec	= tango_l2c_write,
+	.init_late	= tango_pm_init,
 MACHINE_END
 MACHINE_END

+ 10 - 0
arch/arm/mm/proc-macros.S

@@ -274,6 +274,13 @@
 	.endm
 	.endm
 
 
 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+/*
+ * If we are building for big.Little with branch predictor hardening,
+ * we need the processor function tables to remain available after boot.
+ */
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+	.section ".rodata"
+#endif
 	.type	\name\()_processor_functions, #object
 	.type	\name\()_processor_functions, #object
 	.align 2
 	.align 2
 ENTRY(\name\()_processor_functions)
 ENTRY(\name\()_processor_functions)
@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
 	.endif
 	.endif
 
 
 	.size	\name\()_processor_functions, . - \name\()_processor_functions
 	.size	\name\()_processor_functions, . - \name\()_processor_functions
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+	.previous
+#endif
 .endm
 .endm
 
 
 .macro define_cache_functions name:req
 .macro define_cache_functions name:req

+ 2 - 15
arch/arm/mm/proc-v7-bugs.c

@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
 	case ARM_CPU_PART_CORTEX_A17:
 	case ARM_CPU_PART_CORTEX_A17:
 	case ARM_CPU_PART_CORTEX_A73:
 	case ARM_CPU_PART_CORTEX_A73:
 	case ARM_CPU_PART_CORTEX_A75:
 	case ARM_CPU_PART_CORTEX_A75:
-		if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
-			goto bl_error;
 		per_cpu(harden_branch_predictor_fn, cpu) =
 		per_cpu(harden_branch_predictor_fn, cpu) =
 			harden_branch_predictor_bpiall;
 			harden_branch_predictor_bpiall;
 		spectre_v2_method = "BPIALL";
 		spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
 
 
 	case ARM_CPU_PART_CORTEX_A15:
 	case ARM_CPU_PART_CORTEX_A15:
 	case ARM_CPU_PART_BRAHMA_B15:
 	case ARM_CPU_PART_BRAHMA_B15:
-		if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
-			goto bl_error;
 		per_cpu(harden_branch_predictor_fn, cpu) =
 		per_cpu(harden_branch_predictor_fn, cpu) =
 			harden_branch_predictor_iciallu;
 			harden_branch_predictor_iciallu;
 		spectre_v2_method = "ICIALLU";
 		spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 			if ((int)res.a0 != 0)
 			if ((int)res.a0 != 0)
 				break;
 				break;
-			if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
-				goto bl_error;
 			per_cpu(harden_branch_predictor_fn, cpu) =
 			per_cpu(harden_branch_predictor_fn, cpu) =
 				call_hvc_arch_workaround_1;
 				call_hvc_arch_workaround_1;
-			processor.switch_mm = cpu_v7_hvc_switch_mm;
+			cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
 			spectre_v2_method = "hypervisor";
 			spectre_v2_method = "hypervisor";
 			break;
 			break;
 
 
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 			if ((int)res.a0 != 0)
 			if ((int)res.a0 != 0)
 				break;
 				break;
-			if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
-				goto bl_error;
 			per_cpu(harden_branch_predictor_fn, cpu) =
 			per_cpu(harden_branch_predictor_fn, cpu) =
 				call_smc_arch_workaround_1;
 				call_smc_arch_workaround_1;
-			processor.switch_mm = cpu_v7_smc_switch_mm;
+			cpu_do_switch_mm = cpu_v7_smc_switch_mm;
 			spectre_v2_method = "firmware";
 			spectre_v2_method = "firmware";
 			break;
 			break;
 
 
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
 	if (spectre_v2_method)
 	if (spectre_v2_method)
 		pr_info("CPU%u: Spectre v2: using %s workaround\n",
 		pr_info("CPU%u: Spectre v2: using %s workaround\n",
 			smp_processor_id(), spectre_v2_method);
 			smp_processor_id(), spectre_v2_method);
-	return;
-
-bl_error:
-	pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
-		cpu);
 }
 }
 #else
 #else
 static void cpu_v7_spectre_init(void)
 static void cpu_v7_spectre_init(void)

+ 8 - 12
arch/arm/vfp/vfpmodule.c

@@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
  * Save the current VFP state into the provided structures and prepare
  * Save the current VFP state into the provided structures and prepare
  * for entry into a new function (signal handler).
  * for entry into a new function (signal handler).
  */
  */
-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
-				    struct user_vfp_exc __user *ufp_exc)
+int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
+				    struct user_vfp_exc *ufp_exc)
 {
 {
 	struct thread_info *thread = current_thread_info();
 	struct thread_info *thread = current_thread_info();
 	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
 	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
-	int err = 0;
 
 
 	/* Ensure that the saved hwstate is up-to-date. */
 	/* Ensure that the saved hwstate is up-to-date. */
 	vfp_sync_hwstate(thread);
 	vfp_sync_hwstate(thread);
@@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
 	 * Copy the floating point registers. There can be unused
 	 * Copy the floating point registers. There can be unused
 	 * registers see asm/hwcap.h for details.
 	 * registers see asm/hwcap.h for details.
 	 */
 	 */
-	err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
-			      sizeof(hwstate->fpregs));
+	memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
+
 	/*
 	/*
 	 * Copy the status and control register.
 	 * Copy the status and control register.
 	 */
 	 */
-	__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+	ufp->fpscr = hwstate->fpscr;
 
 
 	/*
 	/*
 	 * Copy the exception registers.
 	 * Copy the exception registers.
 	 */
 	 */
-	__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
-	__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
-	__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
-
-	if (err)
-		return -EFAULT;
+	ufp_exc->fpexc = hwstate->fpexc;
+	ufp_exc->fpinst = hwstate->fpinst;
+	ufp_exc->fpinst2 = hwstate->fpinst2;
 
 
 	/* Ensure that VFP is disabled. */
 	/* Ensure that VFP is disabled. */
 	vfp_flush_hwstate(thread);
 	vfp_flush_hwstate(thread);

+ 17 - 0
arch/arm64/boot/dts/marvell/armada-ap806.dtsi

@@ -27,6 +27,23 @@
 		method = "smc";
 		method = "smc";
 	};
 	};
 
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/*
+		 * This area matches the mapping done with a
+		 * mainline U-Boot, and should be updated by the
+		 * bootloader.
+		 */
+
+		psci-area@4000000 {
+			reg = <0x0 0x4000000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
 	ap806 {
 	ap806 {
 		#address-cells = <2>;
 		#address-cells = <2>;
 		#size-cells = <2>;
 		#size-cells = <2>;

+ 18 - 12
arch/arm64/include/asm/assembler.h

@@ -378,27 +378,33 @@ alternative_endif
  * 	size:		size of the region
  * 	size:		size of the region
  * 	Corrupts:	kaddr, size, tmp1, tmp2
  * 	Corrupts:	kaddr, size, tmp1, tmp2
  */
  */
+	.macro __dcache_op_workaround_clean_cache, op, kaddr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+	dc	\op, \kaddr
+alternative_else
+	dc	civac, \kaddr
+alternative_endif
+	.endm
+
 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
 	dcache_line_size \tmp1, \tmp2
 	dcache_line_size \tmp1, \tmp2
 	add	\size, \kaddr, \size
 	add	\size, \kaddr, \size
 	sub	\tmp2, \tmp1, #1
 	sub	\tmp2, \tmp1, #1
 	bic	\kaddr, \kaddr, \tmp2
 	bic	\kaddr, \kaddr, \tmp2
 9998:
 9998:
-	.if	(\op == cvau || \op == cvac)
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
-	dc	\op, \kaddr
-alternative_else
-	dc	civac, \kaddr
-alternative_endif
-	.elseif	(\op == cvap)
-alternative_if ARM64_HAS_DCPOP
-	sys 3, c7, c12, 1, \kaddr	// dc cvap
-alternative_else
-	dc	cvac, \kaddr
-alternative_endif
+	.ifc	\op, cvau
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvac
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvap
+	sys	3, c7, c12, 1, \kaddr	// dc cvap
 	.else
 	.else
 	dc	\op, \kaddr
 	dc	\op, \kaddr
 	.endif
 	.endif
+	.endif
+	.endif
 	add	\kaddr, \kaddr, \tmp1
 	add	\kaddr, \kaddr, \tmp1
 	cmp	\kaddr, \size
 	cmp	\kaddr, \size
 	b.lo	9998b
 	b.lo	9998b

+ 24 - 8
arch/arm64/include/asm/io.h

@@ -106,7 +106,23 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 }
 }
 
 
 /* IO barriers */
 /* IO barriers */
-#define __iormb()		rmb()
+#define __iormb(v)							\
+({									\
+	unsigned long tmp;						\
+									\
+	rmb();								\
+									\
+	/*								\
+	 * Create a dummy control dependency from the IO read to any	\
+	 * later instructions. This ensures that a subsequent call to	\
+	 * udelay() will be ordered due to the ISB in get_cycles().	\
+	 */								\
+	asm volatile("eor	%0, %1, %1\n"				\
+		     "cbnz	%0, ."					\
+		     : "=r" (tmp) : "r" ((unsigned long)(v))		\
+		     : "memory");					\
+})
+
 #define __iowmb()		wmb()
 #define __iowmb()		wmb()
 
 
 #define mmiowb()		do { } while (0)
 #define mmiowb()		do { } while (0)
@@ -131,10 +147,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
  * following Normal memory access. Writes are ordered relative to any prior
  * following Normal memory access. Writes are ordered relative to any prior
  * Normal memory access.
  * Normal memory access.
  */
  */
-#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
-#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
-#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
-#define readq(c)		({ u64 __v = readq_relaxed(c); __iormb(); __v; })
+#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(__v); __v; })
+#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
+#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
+#define readq(c)		({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
 
 
 #define writeb(v,c)		({ __iowmb(); writeb_relaxed((v),(c)); })
 #define writeb(v,c)		({ __iowmb(); writeb_relaxed((v),(c)); })
 #define writew(v,c)		({ __iowmb(); writew_relaxed((v),(c)); })
 #define writew(v,c)		({ __iowmb(); writew_relaxed((v),(c)); })
@@ -185,9 +201,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 /*
 /*
  * io{read,write}{16,32,64}be() macros
  * io{read,write}{16,32,64}be() macros
  */
  */
-#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-#define ioread64be(p)		({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
+#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
+#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
+#define ioread64be(p)		({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
 
 
 #define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
 #define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
 #define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
 #define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })

+ 3 - 0
arch/arm64/include/asm/kvm_arm.h

@@ -24,6 +24,8 @@
 
 
 /* Hyp Configuration Register (HCR) bits */
 /* Hyp Configuration Register (HCR) bits */
 #define HCR_FWB		(UL(1) << 46)
 #define HCR_FWB		(UL(1) << 46)
+#define HCR_API		(UL(1) << 41)
+#define HCR_APK		(UL(1) << 40)
 #define HCR_TEA		(UL(1) << 37)
 #define HCR_TEA		(UL(1) << 37)
 #define HCR_TERR	(UL(1) << 36)
 #define HCR_TERR	(UL(1) << 36)
 #define HCR_TLOR	(UL(1) << 35)
 #define HCR_TLOR	(UL(1) << 35)
@@ -87,6 +89,7 @@
 			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
 			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
 			 HCR_FMO | HCR_IMO)
 			 HCR_FMO | HCR_IMO)
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
 
 /* TCR_EL2 Registers bits */
 /* TCR_EL2 Registers bits */

+ 6 - 1
arch/arm64/include/asm/memory.h

@@ -76,12 +76,17 @@
 /*
 /*
  * KASAN requires 1/8th of the kernel virtual address space for the shadow
  * KASAN requires 1/8th of the kernel virtual address space for the shadow
  * region. KASAN can bloat the stack significantly, so double the (minimum)
  * region. KASAN can bloat the stack significantly, so double the (minimum)
- * stack size when KASAN is in use.
+ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
+ * on.
  */
  */
 #ifdef CONFIG_KASAN
 #ifdef CONFIG_KASAN
 #define KASAN_SHADOW_SCALE_SHIFT 3
 #define KASAN_SHADOW_SCALE_SHIFT 3
 #define KASAN_SHADOW_SIZE	(UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
 #define KASAN_SHADOW_SIZE	(UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_THREAD_SHIFT	2
+#else
 #define KASAN_THREAD_SHIFT	1
 #define KASAN_THREAD_SHIFT	1
+#endif /* CONFIG_KASAN_EXTRA */
 #else
 #else
 #define KASAN_SHADOW_SIZE	(0)
 #define KASAN_SHADOW_SIZE	(0)
 #define KASAN_THREAD_SHIFT	0
 #define KASAN_THREAD_SHIFT	0

+ 1 - 1
arch/arm64/include/uapi/asm/ptrace.h

@@ -130,7 +130,7 @@ struct user_sve_header {
 
 
 /* Offset from the start of struct user_sve_header to the register data */
 /* Offset from the start of struct user_sve_header to the register data */
 #define SVE_PT_REGS_OFFSET					\
 #define SVE_PT_REGS_OFFSET					\
-	((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))	\
+	((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1))	\
 		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
 		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
 
 
 /*
 /*

+ 0 - 1
arch/arm64/kernel/entry-ftrace.S

@@ -79,7 +79,6 @@
 	.macro mcount_get_lr reg
 	.macro mcount_get_lr reg
 	ldr	\reg, [x29]
 	ldr	\reg, [x29]
 	ldr	\reg, [\reg, #8]
 	ldr	\reg, [\reg, #8]
-	mcount_adjust_addr	\reg, \reg
 	.endm
 	.endm
 
 
 	.macro mcount_get_lr_addr reg
 	.macro mcount_get_lr_addr reg

+ 2 - 3
arch/arm64/kernel/head.S

@@ -494,10 +494,9 @@ ENTRY(el2_setup)
 #endif
 #endif
 
 
 	/* Hyp configuration. */
 	/* Hyp configuration. */
-	mov	x0, #HCR_RW			// 64-bit EL1
+	mov_q	x0, HCR_HOST_NVHE_FLAGS
 	cbz	x2, set_hcr
 	cbz	x2, set_hcr
-	orr	x0, x0, #HCR_TGE		// Enable Host Extensions
-	orr	x0, x0, #HCR_E2H
+	mov_q	x0, HCR_HOST_VHE_FLAGS
 set_hcr:
 set_hcr:
 	msr	hcr_el2, x0
 	msr	hcr_el2, x0
 	isb
 	isb

+ 3 - 1
arch/arm64/kernel/hibernate.c

@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
 		dcache_clean_range(__idmap_text_start, __idmap_text_end);
 		dcache_clean_range(__idmap_text_start, __idmap_text_end);
 
 
 		/* Clean kvm setup code to PoC? */
 		/* Clean kvm setup code to PoC? */
-		if (el2_reset_needed())
+		if (el2_reset_needed()) {
 			dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
 			dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+			dcache_clean_range(__hyp_text_start, __hyp_text_end);
+		}
 
 
 		/* make the crash dump kernel image protected again */
 		/* make the crash dump kernel image protected again */
 		crash_post_resume();
 		crash_post_resume();

+ 2 - 0
arch/arm64/kernel/hyp-stub.S

@@ -28,6 +28,8 @@
 #include <asm/virt.h>
 #include <asm/virt.h>
 
 
 	.text
 	.text
+	.pushsection	.hyp.text, "ax"
+
 	.align 11
 	.align 11
 
 
 ENTRY(__hyp_stub_vectors)
 ENTRY(__hyp_stub_vectors)

+ 7 - 2
arch/arm64/kernel/kaslr.c

@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/types.h>
 #include <linux/types.h>
 
 
+#include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 #include <asm/fixmap.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/memory.h>
@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
 	return ret;
 	return ret;
 }
 }
 
 
-static __init const u8 *get_cmdline(void *fdt)
+static __init const u8 *kaslr_get_cmdline(void *fdt)
 {
 {
 	static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
 	static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
 
 
@@ -87,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
 	 * we end up running with module randomization disabled.
 	 * we end up running with module randomization disabled.
 	 */
 	 */
 	module_alloc_base = (u64)_etext - MODULES_VSIZE;
 	module_alloc_base = (u64)_etext - MODULES_VSIZE;
+	__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
 
 
 	/*
 	/*
 	 * Try to map the FDT early. If this fails, we simply bail,
 	 * Try to map the FDT early. If this fails, we simply bail,
@@ -109,7 +111,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
 	 * Check if 'nokaslr' appears on the command line, and
 	 * Check if 'nokaslr' appears on the command line, and
 	 * return 0 if that is the case.
 	 * return 0 if that is the case.
 	 */
 	 */
-	cmdline = get_cmdline(fdt);
+	cmdline = kaslr_get_cmdline(fdt);
 	str = strstr(cmdline, "nokaslr");
 	str = strstr(cmdline, "nokaslr");
 	if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
 	if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
 		return 0;
 		return 0;
@@ -169,5 +171,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
 	module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
 	module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
 	module_alloc_base &= PAGE_MASK;
 	module_alloc_base &= PAGE_MASK;
 
 
+	__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+	__flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
 	return offset;
 	return offset;
 }
 }

+ 1 - 0
arch/arm64/kernel/perf_event.c

@@ -1274,6 +1274,7 @@ static struct platform_driver armv8_pmu_driver = {
 	.driver		= {
 	.driver		= {
 		.name	= ARMV8_PMU_PDEV_NAME,
 		.name	= ARMV8_PMU_PDEV_NAME,
 		.of_match_table = armv8_pmu_of_device_ids,
 		.of_match_table = armv8_pmu_of_device_ids,
+		.suppress_bind_attrs = true,
 	},
 	},
 	.probe		= armv8_pmu_device_probe,
 	.probe		= armv8_pmu_device_probe,
 };
 };

+ 4 - 5
arch/arm64/kernel/sys_compat.c

@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
 /*
 /*
  * Handle all unrecognised system calls.
  * Handle all unrecognised system calls.
  */
  */
-long compat_arm_syscall(struct pt_regs *regs)
+long compat_arm_syscall(struct pt_regs *regs, int scno)
 {
 {
 	siginfo_t info;
 	siginfo_t info;
-	unsigned int no = regs->regs[7];
 
 
-	switch (no) {
+	switch (scno) {
 	/*
 	/*
 	 * Flush a region from virtual address 'r0' to virtual address 'r1'
 	 * Flush a region from virtual address 'r0' to virtual address 'r1'
 	 * _exclusive_.  There is no alignment requirement on either address;
 	 * _exclusive_.  There is no alignment requirement on either address;
@@ -107,7 +106,7 @@ long compat_arm_syscall(struct pt_regs *regs)
 		 * way the calling program can gracefully determine whether
 		 * way the calling program can gracefully determine whether
 		 * a feature is supported.
 		 * a feature is supported.
 		 */
 		 */
-		if (no < __ARM_NR_COMPAT_END)
+		if (scno < __ARM_NR_COMPAT_END)
 			return -ENOSYS;
 			return -ENOSYS;
 		break;
 		break;
 	}
 	}
@@ -119,6 +118,6 @@ long compat_arm_syscall(struct pt_regs *regs)
 	info.si_addr  = (void __user *)instruction_pointer(regs) -
 	info.si_addr  = (void __user *)instruction_pointer(regs) -
 			 (compat_thumb_mode(regs) ? 2 : 4);
 			 (compat_thumb_mode(regs) ? 2 : 4);
 
 
-	arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
+	arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno);
 	return 0;
 	return 0;
 }
 }

+ 4 - 5
arch/arm64/kernel/syscall.c

@@ -13,16 +13,15 @@
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/unistd.h>
 
 
-long compat_arm_syscall(struct pt_regs *regs);
-
+long compat_arm_syscall(struct pt_regs *regs, int scno);
 long sys_ni_syscall(void);
 long sys_ni_syscall(void);
 
 
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
+static long do_ni_syscall(struct pt_regs *regs, int scno)
 {
 {
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_COMPAT
 	long ret;
 	long ret;
 	if (is_compat_task()) {
 	if (is_compat_task()) {
-		ret = compat_arm_syscall(regs);
+		ret = compat_arm_syscall(regs, scno);
 		if (ret != -ENOSYS)
 		if (ret != -ENOSYS)
 			return ret;
 			return ret;
 	}
 	}
@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
 		syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
 		syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
 		ret = __invoke_syscall(regs, syscall_fn);
 		ret = __invoke_syscall(regs, syscall_fn);
 	} else {
 	} else {
-		ret = do_ni_syscall(regs);
+		ret = do_ni_syscall(regs, scno);
 	}
 	}
 
 
 	regs->regs[0] = ret;
 	regs->regs[0] = ret;

+ 1 - 1
arch/arm64/kvm/hyp/switch.c

@@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
 	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
 	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
 
 
 	write_sysreg(mdcr_el2, mdcr_el2);
 	write_sysreg(mdcr_el2, mdcr_el2);
-	write_sysreg(HCR_RW, hcr_el2);
+	write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
 	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 }
 
 

+ 3 - 0
arch/arm64/mm/cache.S

@@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area)
  *	- size    - size in question
  *	- size    - size in question
  */
  */
 ENTRY(__clean_dcache_area_pop)
 ENTRY(__clean_dcache_area_pop)
+	alternative_if_not ARM64_HAS_DCPOP
+	b	__clean_dcache_area_poc
+	alternative_else_nop_endif
 	dcache_by_line_op cvap, sy, x0, x1, x2, x3
 	dcache_by_line_op cvap, sy, x0, x1, x2, x3
 	ret
 	ret
 ENDPIPROC(__clean_dcache_area_pop)
 ENDPIPROC(__clean_dcache_area_pop)

+ 5 - 1
arch/arm64/mm/flush.c

@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
 		__clean_dcache_area_pou(kaddr, len);
 		__clean_dcache_area_pou(kaddr, len);
 		__flush_icache_all();
 		__flush_icache_all();
 	} else {
 	} else {
-		flush_icache_range(addr, addr + len);
+		/*
+		 * Don't issue kick_all_cpus_sync() after I-cache invalidation
+		 * for user mappings.
+		 */
+		__flush_icache_range(addr, addr + len);
 	}
 	}
 }
 }
 
 

+ 4 - 0
arch/mips/Kconfig

@@ -794,6 +794,7 @@ config SIBYTE_SWARM
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select ZONE_DMA32 if 64BIT
 	select ZONE_DMA32 if 64BIT
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 
 config SIBYTE_LITTLESUR
 config SIBYTE_LITTLESUR
 	bool "Sibyte BCM91250C2-LittleSur"
 	bool "Sibyte BCM91250C2-LittleSur"
@@ -814,6 +815,7 @@ config SIBYTE_SENTOSA
 	select SYS_HAS_CPU_SB1
 	select SYS_HAS_CPU_SB1
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 
 config SIBYTE_BIGSUR
 config SIBYTE_BIGSUR
 	bool "Sibyte BCM91480B-BigSur"
 	bool "Sibyte BCM91480B-BigSur"
@@ -826,6 +828,7 @@ config SIBYTE_BIGSUR
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select ZONE_DMA32 if 64BIT
 	select ZONE_DMA32 if 64BIT
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 
 config SNI_RM
 config SNI_RM
 	bool "SNI RM200/300/400"
 	bool "SNI RM200/300/400"
@@ -3149,6 +3152,7 @@ config MIPS32_O32
 config MIPS32_N32
 config MIPS32_N32
 	bool "Kernel support for n32 binaries"
 	bool "Kernel support for n32 binaries"
 	depends on 64BIT
 	depends on 64BIT
+	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
 	select COMPAT
 	select COMPAT
 	select MIPS32_COMPAT
 	select MIPS32_COMPAT
 	select SYSVIPC_COMPAT if SYSVIPC
 	select SYSVIPC_COMPAT if SYSVIPC

+ 31 - 0
arch/mips/bcm47xx/setup.c

@@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
 	pm_power_off = bcm47xx_machine_halt;
 	pm_power_off = bcm47xx_machine_halt;
 }
 }
 
 
+#ifdef CONFIG_BCM47XX_BCMA
+static struct device * __init bcm47xx_setup_device(void)
+{
+	struct device *dev;
+	int err;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	err = dev_set_name(dev, "bcm47xx_soc");
+	if (err) {
+		pr_err("Failed to set SoC device name: %d\n", err);
+		kfree(dev);
+		return NULL;
+	}
+
+	err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (err)
+		pr_err("Failed to set SoC DMA mask: %d\n", err);
+
+	return dev;
+}
+#endif
+
 /*
 /*
  * This finishes bus initialization doing things that were not possible without
  * This finishes bus initialization doing things that were not possible without
  * kmalloc. Make sure to call it late enough (after mm_init).
  * kmalloc. Make sure to call it late enough (after mm_init).
@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
 	if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
 	if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
 		int err;
 		int err;
 
 
+		bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
+		if (!bcm47xx_bus.bcma.dev)
+			panic("Failed to setup SoC device\n");
+
 		err = bcma_host_soc_init(&bcm47xx_bus.bcma);
 		err = bcma_host_soc_init(&bcm47xx_bus.bcma);
 		if (err)
 		if (err)
 			panic("Failed to initialize BCMA bus (err %d)", err);
 			panic("Failed to initialize BCMA bus (err %d)", err);
@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
 #endif
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
 #ifdef CONFIG_BCM47XX_BCMA
 	case BCM47XX_BUS_TYPE_BCMA:
 	case BCM47XX_BUS_TYPE_BCMA:
+		if (device_register(bcm47xx_bus.bcma.dev))
+			pr_err("Failed to register SoC device\n");
 		bcma_bus_register(&bcm47xx_bus.bcma.bus);
 		bcma_bus_register(&bcm47xx_bus.bcma.bus);
 		break;
 		break;
 #endif
 #endif

+ 6 - 0
arch/mips/boot/dts/img/boston.dts

@@ -141,6 +141,12 @@
 				#size-cells = <2>;
 				#size-cells = <2>;
 				#interrupt-cells = <1>;
 				#interrupt-cells = <1>;
 
 
+				eg20t_phub@2,0,0 {
+					compatible = "pci8086,8801";
+					reg = <0x00020000 0 0 0 0>;
+					intel,eg20t-prefetch = <0>;
+				};
+
 				eg20t_mac@2,0,1 {
 				eg20t_mac@2,0,1 {
 					compatible = "pci8086,8802";
 					compatible = "pci8086,8802";
 					reg = <0x00020100 0 0 0 0>;
 					reg = <0x00020100 0 0 0 0>;

+ 0 - 2
arch/mips/include/asm/mach-jz4740/jz4740_mmc.h

@@ -4,8 +4,6 @@
 
 
 struct jz4740_mmc_platform_data {
 struct jz4740_mmc_platform_data {
 	int gpio_power;
 	int gpio_power;
-	int gpio_card_detect;
-	int gpio_read_only;
 	unsigned card_detect_active_low:1;
 	unsigned card_detect_active_low:1;
 	unsigned read_only_active_low:1;
 	unsigned read_only_active_low:1;
 	unsigned power_active_low:1;
 	unsigned power_active_low:1;

+ 1 - 1
arch/mips/include/uapi/asm/inst.h

@@ -369,8 +369,8 @@ enum mm_32a_minor_op {
 	mm_ext_op = 0x02c,
 	mm_ext_op = 0x02c,
 	mm_pool32axf_op = 0x03c,
 	mm_pool32axf_op = 0x03c,
 	mm_srl32_op = 0x040,
 	mm_srl32_op = 0x040,
+	mm_srlv32_op = 0x050,
 	mm_sra_op = 0x080,
 	mm_sra_op = 0x080,
-	mm_srlv32_op = 0x090,
 	mm_rotr_op = 0x0c0,
 	mm_rotr_op = 0x0c0,
 	mm_lwxs_op = 0x118,
 	mm_lwxs_op = 0x118,
 	mm_addu32_op = 0x150,
 	mm_addu32_op = 0x150,

+ 9 - 3
arch/mips/jz4740/board-qi_lb60.c

@@ -43,7 +43,6 @@
 #include "clock.h"
 #include "clock.h"
 
 
 /* GPIOs */
 /* GPIOs */
-#define QI_LB60_GPIO_SD_CD		JZ_GPIO_PORTD(0)
 #define QI_LB60_GPIO_SD_VCC_EN_N	JZ_GPIO_PORTD(2)
 #define QI_LB60_GPIO_SD_VCC_EN_N	JZ_GPIO_PORTD(2)
 
 
 #define QI_LB60_GPIO_KEYOUT(x)		(JZ_GPIO_PORTC(10) + (x))
 #define QI_LB60_GPIO_KEYOUT(x)		(JZ_GPIO_PORTC(10) + (x))
@@ -386,12 +385,18 @@ static struct platform_device qi_lb60_gpio_keys = {
 };
 };
 
 
 static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = {
 static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = {
-	.gpio_card_detect	= QI_LB60_GPIO_SD_CD,
-	.gpio_read_only		= -1,
 	.gpio_power		= QI_LB60_GPIO_SD_VCC_EN_N,
 	.gpio_power		= QI_LB60_GPIO_SD_VCC_EN_N,
 	.power_active_low	= 1,
 	.power_active_low	= 1,
 };
 };
 
 
+static struct gpiod_lookup_table qi_lb60_mmc_gpio_table = {
+	.dev_id = "jz4740-mmc.0",
+	.table = {
+		GPIO_LOOKUP("GPIOD", 0, "cd", GPIO_ACTIVE_HIGH),
+		{ },
+	},
+};
+
 /* beeper */
 /* beeper */
 static struct pwm_lookup qi_lb60_pwm_lookup[] = {
 static struct pwm_lookup qi_lb60_pwm_lookup[] = {
 	PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
 	PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
@@ -500,6 +505,7 @@ static int __init qi_lb60_init_platform_devices(void)
 	gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
 	gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
 	gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
 	gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
 	gpiod_add_lookup_table(&qi_lb60_spigpio_gpio_table);
 	gpiod_add_lookup_table(&qi_lb60_spigpio_gpio_table);
+	gpiod_add_lookup_table(&qi_lb60_mmc_gpio_table);
 
 
 	spi_register_board_info(qi_lb60_spi_board_info,
 	spi_register_board_info(qi_lb60_spi_board_info,
 				ARRAY_SIZE(qi_lb60_spi_board_info));
 				ARRAY_SIZE(qi_lb60_spi_board_info));

+ 1 - 1
arch/mips/kernel/mips-cm.c

@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
 	}
 	}
 
 
 	/* reprime cause register */
 	/* reprime cause register */
-	write_gcr_error_cause(0);
+	write_gcr_error_cause(cm_error);
 }
 }

+ 5 - 63
arch/mips/lantiq/irq.c

@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
 	.irq_set_type = ltq_eiu_settype,
 	.irq_set_type = ltq_eiu_settype,
 };
 };
 
 
-static void ltq_hw_irqdispatch(int module)
+static void ltq_hw_irq_handler(struct irq_desc *desc)
 {
 {
+	int module = irq_desc_get_irq(desc) - 2;
 	u32 irq;
 	u32 irq;
+	int hwirq;
 
 
 	irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
 	irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
 	if (irq == 0)
 	if (irq == 0)
@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
 	 * other bits might be bogus
 	 * other bits might be bogus
 	 */
 	 */
 	irq = __fls(irq);
 	irq = __fls(irq);
-	do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
+	hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+	generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
 
 
 	/* if this is a EBU irq, we need to ack it or get a deadlock */
 	/* if this is a EBU irq, we need to ack it or get a deadlock */
 	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
 	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
 			LTQ_EBU_PCC_ISTAT);
 			LTQ_EBU_PCC_ISTAT);
 }
 }
 
 
-#define DEFINE_HWx_IRQDISPATCH(x)					\
-	static void ltq_hw ## x ## _irqdispatch(void)			\
-	{								\
-		ltq_hw_irqdispatch(x);					\
-	}
-DEFINE_HWx_IRQDISPATCH(0)
-DEFINE_HWx_IRQDISPATCH(1)
-DEFINE_HWx_IRQDISPATCH(2)
-DEFINE_HWx_IRQDISPATCH(3)
-DEFINE_HWx_IRQDISPATCH(4)
-
-#if MIPS_CPU_TIMER_IRQ == 7
-static void ltq_hw5_irqdispatch(void)
-{
-	do_IRQ(MIPS_CPU_TIMER_IRQ);
-}
-#else
-DEFINE_HWx_IRQDISPATCH(5)
-#endif
-
-static void ltq_hw_irq_handler(struct irq_desc *desc)
-{
-	ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
-	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-	int irq;
-
-	if (!pending) {
-		spurious_interrupt();
-		return;
-	}
-
-	pending >>= CAUSEB_IP;
-	while (pending) {
-		irq = fls(pending) - 1;
-		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
-		pending &= ~BIT(irq);
-	}
-}
-
 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
 {
 {
 	struct irq_chip *chip = &ltq_irq_type;
 	struct irq_chip *chip = &ltq_irq_type;
@@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
 	for (i = 0; i < MAX_IM; i++)
 	for (i = 0; i < MAX_IM; i++)
 		irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
 		irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
 
 
-	if (cpu_has_vint) {
-		pr_info("Setting up vectored interrupts\n");
-		set_vi_handler(2, ltq_hw0_irqdispatch);
-		set_vi_handler(3, ltq_hw1_irqdispatch);
-		set_vi_handler(4, ltq_hw2_irqdispatch);
-		set_vi_handler(5, ltq_hw3_irqdispatch);
-		set_vi_handler(6, ltq_hw4_irqdispatch);
-		set_vi_handler(7, ltq_hw5_irqdispatch);
-	}
-
 	ltq_domain = irq_domain_add_linear(node,
 	ltq_domain = irq_domain_add_linear(node,
 		(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
 		(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
 		&irq_domain_ops, 0);
 		&irq_domain_ops, 0);
 
 
-#ifndef CONFIG_MIPS_MT_SMP
-	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
-		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#else
-	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
-		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#endif
-
 	/* tell oprofile which irq to use */
 	/* tell oprofile which irq to use */
 	ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 	ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 
 

+ 6 - 1
arch/mips/loongson64/common/reset.c

@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
 {
 {
 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
 	mach_prepare_shutdown();
 	mach_prepare_shutdown();
-	unreachable();
+
+	/*
+	 * It needs a wait loop here, but mips/kernel/reset.c already calls
+	 * a generic delay loop, machine_hang(), so simply return.
+	 */
+	return;
 #else
 #else
 	void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
 	void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
 
 

+ 3 - 1
arch/mips/pci/msi-octeon.c

@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
 	int irq;
 	int irq;
 	struct irq_chip *msi;
 	struct irq_chip *msi;
 
 
-	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+		return 0;
+	} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
 		msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
 		msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
 		msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
 		msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
 		msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
 		msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;

+ 5 - 5
arch/mips/pci/pci-octeon.c

@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
 	if (octeon_has_feature(OCTEON_FEATURE_PCIE))
 	if (octeon_has_feature(OCTEON_FEATURE_PCIE))
 		return 0;
 		return 0;
 
 
+	if (!octeon_is_pci_host()) {
+		pr_notice("Not in host mode, PCI Controller not initialized\n");
+		return 0;
+	}
+
 	/* Point pcibios_map_irq() to the PCI version of it */
 	/* Point pcibios_map_irq() to the PCI version of it */
 	octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
 	octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
 
 
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
 	else
 	else
 		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
 		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
 
 
-	if (!octeon_is_pci_host()) {
-		pr_notice("Not in host mode, PCI Controller not initialized\n");
-		return 0;
-	}
-
 	/* PCI I/O and PCI MEM values */
 	/* PCI I/O and PCI MEM values */
 	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
 	set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
 	ioport_resource.start = 0;
 	ioport_resource.start = 0;

+ 1 - 0
arch/mips/ralink/Kconfig

@@ -38,6 +38,7 @@ choice
 
 
 	config SOC_MT7620
 	config SOC_MT7620
 		bool "MT7620/8"
 		bool "MT7620/8"
+		select CPU_MIPSR2_IRQ_VI
 		select HW_HAS_PCI
 		select HW_HAS_PCI
 
 
 	config SOC_MT7621
 	config SOC_MT7621

+ 1 - 0
arch/mips/sibyte/common/Makefile

@@ -1,4 +1,5 @@
 obj-y := cfe.o
 obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB)			+= dma.o
 obj-$(CONFIG_SIBYTE_BUS_WATCHER)	+= bus_watcher.o
 obj-$(CONFIG_SIBYTE_BUS_WATCHER)	+= bus_watcher.o
 obj-$(CONFIG_SIBYTE_CFE_CONSOLE)	+= cfe_console.o
 obj-$(CONFIG_SIBYTE_CFE_CONSOLE)	+= cfe_console.o
 obj-$(CONFIG_SIBYTE_TBPROF)		+= sb_tbprof.o
 obj-$(CONFIG_SIBYTE_TBPROF)		+= sb_tbprof.o

+ 14 - 0
arch/mips/sibyte/common/dma.c

@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *	DMA support for Broadcom SiByte platforms.
+ *
+ *	Copyright (c) 2018  Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+	swiotlb_init(1);
+}

+ 3 - 2
arch/mips/vdso/Makefile

@@ -8,6 +8,7 @@ ccflags-vdso := \
 	$(filter -E%,$(KBUILD_CFLAGS)) \
 	$(filter -E%,$(KBUILD_CFLAGS)) \
 	$(filter -mmicromips,$(KBUILD_CFLAGS)) \
 	$(filter -mmicromips,$(KBUILD_CFLAGS)) \
 	$(filter -march=%,$(KBUILD_CFLAGS)) \
 	$(filter -march=%,$(KBUILD_CFLAGS)) \
+	$(filter -m%-float,$(KBUILD_CFLAGS)) \
 	-D__VDSO__
 	-D__VDSO__
 
 
 ifeq ($(cc-name),clang)
 ifeq ($(cc-name),clang)
@@ -128,7 +129,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
 	$(call cmd,force_checksrc)
 	$(call cmd,force_checksrc)
 	$(call if_changed_rule,cc_o_c)
 	$(call if_changed_rule,cc_o_c)
 
 
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
 	$(call if_changed_dep,cpp_lds_S)
 	$(call if_changed_dep,cpp_lds_S)
 
 
@@ -168,7 +169,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
 	$(call cmd,force_checksrc)
 	$(call cmd,force_checksrc)
 	$(call if_changed_rule,cc_o_c)
 	$(call if_changed_rule,cc_o_c)
 
 
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
 	$(call if_changed_dep,cpp_lds_S)
 	$(call if_changed_dep,cpp_lds_S)
 
 

+ 5 - 1
arch/nds32/mm/Makefile

@@ -4,4 +4,8 @@ obj-y				:= extable.o tlb.o \
 
 
 obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o
 obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o
 obj-$(CONFIG_HIGHMEM)           += highmem.o
 obj-$(CONFIG_HIGHMEM)           += highmem.o
-CFLAGS_proc-n13.o		+= -fomit-frame-pointer
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_proc.o     = $(CC_FLAGS_FTRACE)
+endif
+CFLAGS_proc.o              += -fomit-frame-pointer

+ 7 - 15
arch/powerpc/include/asm/book3s/64/pgtable.h

@@ -1234,21 +1234,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 
 
 #define pmd_move_must_withdraw pmd_move_must_withdraw
 #define pmd_move_must_withdraw pmd_move_must_withdraw
 struct spinlock;
 struct spinlock;
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
-					 struct spinlock *old_pmd_ptl,
-					 struct vm_area_struct *vma)
-{
-	if (radix_enabled())
-		return false;
-	/*
-	 * Archs like ppc64 use pgtable to store per pmd
-	 * specific information. So when we switch the pmd,
-	 * we should also withdraw and deposit the pgtable
-	 */
-	return true;
-}
-
-
+extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+				  struct spinlock *old_pmd_ptl,
+				  struct vm_area_struct *vma);
+/*
+ * Hash translation mode use the deposited table to store hash pte
+ * slot information.
+ */
 #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
 #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
 static inline bool arch_needs_pgtable_deposit(void)
 static inline bool arch_needs_pgtable_deposit(void)
 {
 {

+ 1 - 1
arch/powerpc/include/asm/fadump.h

@@ -200,7 +200,7 @@ struct fad_crash_memory_ranges {
 	unsigned long long	size;
 	unsigned long long	size;
 };
 };
 
 
-extern int is_fadump_boot_memory_area(u64 addr, ulong size);
+extern int is_fadump_memory_area(u64 addr, ulong size);
 extern int early_init_dt_scan_fw_dump(unsigned long node,
 extern int early_init_dt_scan_fw_dump(unsigned long node,
 		const char *uname, int depth, void *data);
 		const char *uname, int depth, void *data);
 extern int fadump_reserve_mem(void);
 extern int fadump_reserve_mem(void);

+ 1 - 1
arch/powerpc/include/asm/uaccess.h

@@ -63,7 +63,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
 #endif
 #endif
 
 
 #define access_ok(type, addr, size)		\
 #define access_ok(type, addr, size)		\
-	(__chk_user_ptr(addr),			\
+	(__chk_user_ptr(addr), (void)(type),		\
 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
 
 
 /*
 /*

+ 8 - 2
arch/powerpc/kernel/fadump.c

@@ -118,13 +118,19 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
 
 
 /*
 /*
  * If fadump is registered, check if the memory provided
  * If fadump is registered, check if the memory provided
- * falls within boot memory area.
+ * falls within boot memory area and reserved memory area.
  */
  */
-int is_fadump_boot_memory_area(u64 addr, ulong size)
+int is_fadump_memory_area(u64 addr, ulong size)
 {
 {
+	u64 d_start = fw_dump.reserve_dump_area_start;
+	u64 d_end = d_start + fw_dump.reserve_dump_area_size;
+
 	if (!fw_dump.dump_registered)
 	if (!fw_dump.dump_registered)
 		return 0;
 		return 0;
 
 
+	if (((addr + size) > d_start) && (addr <= d_end))
+		return 1;
+
 	return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
 	return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
 }
 }
 
 

+ 4 - 0
arch/powerpc/kernel/vmlinux.lds.S

@@ -296,6 +296,10 @@ SECTIONS
 #ifdef CONFIG_PPC32
 #ifdef CONFIG_PPC32
 	.data : AT(ADDR(.data) - LOAD_OFFSET) {
 	.data : AT(ADDR(.data) - LOAD_OFFSET) {
 		DATA_DATA
 		DATA_DATA
+#ifdef CONFIG_UBSAN
+		*(.data..Lubsan_data*)
+		*(.data..Lubsan_type*)
+#endif
 		*(.data.rel*)
 		*(.data.rel*)
 		*(SDATA_MAIN)
 		*(SDATA_MAIN)
 		*(.sdata2)
 		*(.sdata2)

+ 4 - 1
arch/powerpc/kvm/powerpc.c

@@ -543,8 +543,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #ifdef CONFIG_PPC_BOOK3S_64
 #ifdef CONFIG_PPC_BOOK3S_64
 	case KVM_CAP_SPAPR_TCE:
 	case KVM_CAP_SPAPR_TCE:
 	case KVM_CAP_SPAPR_TCE_64:
 	case KVM_CAP_SPAPR_TCE_64:
-		/* fallthrough */
+		r = 1;
+		break;
 	case KVM_CAP_SPAPR_TCE_VFIO:
 	case KVM_CAP_SPAPR_TCE_VFIO:
+		r = !!cpu_has_feature(CPU_FTR_HVMODE);
+		break;
 	case KVM_CAP_PPC_RTAS:
 	case KVM_CAP_PPC_RTAS:
 	case KVM_CAP_PPC_FIXUP_HCALL:
 	case KVM_CAP_PPC_FIXUP_HCALL:
 	case KVM_CAP_PPC_ENABLE_HCALL:
 	case KVM_CAP_PPC_ENABLE_HCALL:

+ 3 - 1
arch/powerpc/mm/fault.c

@@ -221,7 +221,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
 static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
 static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
 			     unsigned long address)
 			     unsigned long address)
 {
 {
-	if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
+	/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
+	if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
+				      DSISR_PROTFAULT))) {
 		printk_ratelimited(KERN_CRIT "kernel tried to execute"
 		printk_ratelimited(KERN_CRIT "kernel tried to execute"
 				   " exec-protected page (%lx) -"
 				   " exec-protected page (%lx) -"
 				   "exploit attempt? (uid: %d)\n",
 				   "exploit attempt? (uid: %d)\n",

+ 22 - 0
arch/powerpc/mm/pgtable-book3s64.c

@@ -477,3 +477,25 @@ void arch_report_meminfo(struct seq_file *m)
 		   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
 		   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
 }
 }
 #endif /* CONFIG_PROC_FS */
 #endif /* CONFIG_PROC_FS */
+
+/*
+ * For hash translation mode, we use the deposited table to store hash slot
+ * information and they are stored at PTRS_PER_PMD offset from related pmd
+ * location. Hence a pmd move requires deposit and withdraw.
+ *
+ * For radix translation with split pmd ptl, we store the deposited table in the
+ * pmd page. Hence if we have different pmd page we need to withdraw during pmd
+ * move.
+ *
+ * With hash we use deposited table always irrespective of anon or not.
+ * With radix we use deposited table only for anonymous mapping.
+ */
+int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+			   struct spinlock *old_pmd_ptl,
+			   struct vm_area_struct *vma)
+{
+	if (radix_enabled())
+		return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+
+	return true;
+}

+ 6 - 1
arch/powerpc/perf/isa207-common.c

@@ -226,8 +226,13 @@ void isa207_get_mem_weight(u64 *weight)
 	u64 mmcra = mfspr(SPRN_MMCRA);
 	u64 mmcra = mfspr(SPRN_MMCRA);
 	u64 exp = MMCRA_THR_CTR_EXP(mmcra);
 	u64 exp = MMCRA_THR_CTR_EXP(mmcra);
 	u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
 	u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
+	u64 sier = mfspr(SPRN_SIER);
+	u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
 
 
-	*weight = mantissa << (2 * exp);
+	if (val == 0 || val == 7)
+		*weight = 0;
+	else
+		*weight = mantissa << (2 * exp);
 }
 }
 
 
 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)

+ 1 - 1
arch/powerpc/platforms/powernv/pci-ioda-tce.c

@@ -299,7 +299,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 	if (alloc_userspace_copy) {
 	if (alloc_userspace_copy) {
 		offset = 0;
 		offset = 0;
 		uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
 		uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
-				levels, tce_table_size, &offset,
+				tmplevels, tce_table_size, &offset,
 				&total_allocated_uas);
 				&total_allocated_uas);
 		if (!uas)
 		if (!uas)
 			goto free_tces_exit;
 			goto free_tces_exit;

+ 2 - 0
arch/powerpc/platforms/pseries/dlpar.c

@@ -272,6 +272,8 @@ int dlpar_detach_node(struct device_node *dn)
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
+	of_node_put(dn);
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 5 - 2
arch/powerpc/platforms/pseries/hotplug-memory.c

@@ -389,8 +389,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
 	phys_addr = lmb->base_addr;
 	phys_addr = lmb->base_addr;
 
 
 #ifdef CONFIG_FA_DUMP
 #ifdef CONFIG_FA_DUMP
-	/* Don't hot-remove memory that falls in fadump boot memory area */
-	if (is_fadump_boot_memory_area(phys_addr, block_sz))
+	/*
+	 * Don't hot-remove memory that falls in fadump boot memory area
+	 * and memory that is reserved for capturing old kernel memory.
+	 */
+	if (is_fadump_memory_area(phys_addr, block_sz))
 		return false;
 		return false;
 #endif
 #endif
 
 

+ 14 - 4
arch/powerpc/xmon/xmon.c

@@ -75,6 +75,9 @@ static int xmon_gate;
 #define xmon_owner 0
 #define xmon_owner 0
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
+#ifdef CONFIG_PPC_PSERIES
+static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
+#endif
 static unsigned long in_xmon __read_mostly = 0;
 static unsigned long in_xmon __read_mostly = 0;
 static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
 static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
 
 
@@ -358,7 +361,6 @@ static inline void disable_surveillance(void)
 #ifdef CONFIG_PPC_PSERIES
 #ifdef CONFIG_PPC_PSERIES
 	/* Since this can't be a module, args should end up below 4GB. */
 	/* Since this can't be a module, args should end up below 4GB. */
 	static struct rtas_args args;
 	static struct rtas_args args;
-	int token;
 
 
 	/*
 	/*
 	 * At this point we have got all the cpus we can into
 	 * At this point we have got all the cpus we can into
@@ -367,11 +369,11 @@ static inline void disable_surveillance(void)
 	 * If we did try to take rtas.lock there would be a
 	 * If we did try to take rtas.lock there would be a
 	 * real possibility of deadlock.
 	 * real possibility of deadlock.
 	 */
 	 */
-	token = rtas_token("set-indicator");
-	if (token == RTAS_UNKNOWN_SERVICE)
+	if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
 		return;
 		return;
 
 
-	rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
+	rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
+			   SURVEILLANCE_TOKEN, 0, 0);
 
 
 #endif /* CONFIG_PPC_PSERIES */
 #endif /* CONFIG_PPC_PSERIES */
 }
 }
@@ -3672,6 +3674,14 @@ static void xmon_init(int enable)
 		__debugger_iabr_match = xmon_iabr_match;
 		__debugger_iabr_match = xmon_iabr_match;
 		__debugger_break_match = xmon_break_match;
 		__debugger_break_match = xmon_break_match;
 		__debugger_fault_handler = xmon_fault_handler;
 		__debugger_fault_handler = xmon_fault_handler;
+
+#ifdef CONFIG_PPC_PSERIES
+		/*
+		 * Get the token here to avoid trying to get a lock
+		 * during the crash, causing a deadlock.
+		 */
+		set_indicator_token = rtas_token("set-indicator");
+#endif
 	} else {
 	} else {
 		__debugger = NULL;
 		__debugger = NULL;
 		__debugger_ipi = NULL;
 		__debugger_ipi = NULL;

+ 6 - 0
arch/riscv/include/asm/pgtable-bits.h

@@ -35,6 +35,12 @@
 #define _PAGE_SPECIAL   _PAGE_SOFT
 #define _PAGE_SPECIAL   _PAGE_SOFT
 #define _PAGE_TABLE     _PAGE_PRESENT
 #define _PAGE_TABLE     _PAGE_PRESENT
 
 
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_READ
+
 #define _PAGE_PFN_SHIFT 10
 #define _PAGE_PFN_SHIFT 10
 
 
 /* Set of bits to preserve across pte_modify() */
 /* Set of bits to preserve across pte_modify() */

+ 4 - 4
arch/riscv/include/asm/pgtable.h

@@ -44,7 +44,7 @@
 /* Page protection bits */
 /* Page protection bits */
 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
 
 
-#define PAGE_NONE		__pgprot(0)
+#define PAGE_NONE		__pgprot(_PAGE_PROT_NONE)
 #define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
 #define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
 #define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
 #define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
 #define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
 #define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 
 static inline int pmd_present(pmd_t pmd)
 static inline int pmd_present(pmd_t pmd)
 {
 {
-	return (pmd_val(pmd) & _PAGE_PRESENT);
+	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
 }
 }
 
 
 static inline int pmd_none(pmd_t pmd)
 static inline int pmd_none(pmd_t pmd)
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
 
 
 static inline int pte_present(pte_t pte)
 static inline int pte_present(pte_t pte)
 {
 {
-	return (pte_val(pte) & _PAGE_PRESENT);
+	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
 }
 }
 
 
 static inline int pte_none(pte_t pte)
 static inline int pte_none(pte_t pte)
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
  *
  *
  * Format of swap PTE:
  * Format of swap PTE:
  *	bit            0:	_PAGE_PRESENT (zero)
  *	bit            0:	_PAGE_PRESENT (zero)
- *	bit            1:	reserved for future use (zero)
+ *	bit            1:	_PAGE_PROT_NONE (zero)
  *	bits      2 to 6:	swap type
  *	bits      2 to 6:	swap type
  *	bits 7 to XLEN-1:	swap offset
  *	bits 7 to XLEN-1:	swap offset
  */
  */

+ 1 - 1
arch/riscv/kernel/ptrace.c

@@ -120,6 +120,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
 
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
-		trace_sys_exit(regs, regs->regs[0]);
+		trace_sys_exit(regs, regs_return_value(regs));
 #endif
 #endif
 }
 }

+ 2 - 3
arch/s390/include/asm/mmu_context.h

@@ -89,8 +89,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
 {
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
 
 
-	if (prev == next)
-		return;
 	S390_lowcore.user_asce = next->context.asce;
 	S390_lowcore.user_asce = next->context.asce;
 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
 	/* Clear previous user-ASCE from CR1 and CR7 */
 	/* Clear previous user-ASCE from CR1 and CR7 */
@@ -102,7 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 		__ctl_load(S390_lowcore.vdso_asce, 7, 7);
 		__ctl_load(S390_lowcore.vdso_asce, 7, 7);
 		clear_cpu_flag(CIF_ASCE_SECONDARY);
 		clear_cpu_flag(CIF_ASCE_SECONDARY);
 	}
 	}
-	cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+	if (prev != next)
+		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
 }
 }
 
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch

+ 2 - 2
arch/s390/include/uapi/asm/zcrypt.h

@@ -147,8 +147,8 @@ struct ica_xcRB {
  * @cprb_len:		CPRB header length [0x0020]
  * @cprb_len:		CPRB header length [0x0020]
  * @cprb_ver_id:	CPRB version id.   [0x04]
  * @cprb_ver_id:	CPRB version id.   [0x04]
  * @pad_000:		Alignment pad bytes
  * @pad_000:		Alignment pad bytes
- * @flags:		Admin cmd [0x80] or functional cmd [0x00]
- * @func_id:		Function id / subtype [0x5434]
+ * @flags:		Admin bit [0x80], Special bit [0x20]
+ * @func_id:		Function id / subtype [0x5434] "T4"
  * @source_id:		Source id [originator id]
  * @source_id:		Source id [originator id]
  * @target_id:		Target id [usage/ctrl domain id]
  * @target_id:		Target id [usage/ctrl domain id]
  * @ret_code:		Return code
  * @ret_code:		Return code

部分文件因文件數量過多而無法顯示