Эх сурвалжийг харах

Merge 4.5-rc4 into driver-core-next

We want the fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Greg Kroah-Hartman 9 жил өмнө
parent
commit
c21b04f989
100 өөрчлөгдсөн 852 нэмэгдсэн , 547 устгасан
  1. 1 1
      Documentation/cgroup-v2.txt
  2. 1 0
      Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
  3. 1 0
      Documentation/devicetree/bindings/pci/rcar-pci.txt
  4. 2 0
      Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
  5. 35 2
      Documentation/devicetree/bindings/thermal/rcar-thermal.txt
  6. 11 0
      Documentation/kernel-parameters.txt
  7. 5 12
      MAINTAINERS
  8. 1 1
      Makefile
  9. 20 1
      arch/arc/Kconfig
  10. 1 1
      arch/arc/configs/vdk_hs38_smp_defconfig
  11. 1 2
      arch/arc/include/asm/arcregs.h
  12. 5 2
      arch/arc/include/asm/irqflags-arcv2.h
  13. 2 2
      arch/arc/include/asm/mcip.h
  14. 26 19
      arch/arc/include/asm/pgtable.h
  15. 18 1
      arch/arc/kernel/entry-arcv2.S
  16. 24 17
      arch/arc/kernel/intc-arcv2.c
  17. 5 5
      arch/arc/kernel/mcip.c
  18. 13 7
      arch/arc/kernel/setup.c
  19. 4 4
      arch/arc/kernel/time.c
  20. 7 2
      arch/arm/common/icst.c
  21. 16 17
      arch/arm/configs/omap2plus_defconfig
  22. 1 0
      arch/arm64/include/asm/arch_gicv3.h
  23. 1 0
      arch/arm64/include/asm/kvm_arm.h
  24. 6 2
      arch/arm64/include/asm/kvm_emulate.h
  25. 6 2
      arch/arm64/kvm/hyp/switch.c
  26. 35 3
      arch/arm64/kvm/inject_fault.c
  27. 4 5
      arch/arm64/kvm/sys_regs.c
  28. 1 1
      arch/mips/Kconfig
  29. 1 0
      arch/mips/boot/dts/brcm/bcm6328.dtsi
  30. 1 0
      arch/mips/boot/dts/brcm/bcm7125.dtsi
  31. 1 0
      arch/mips/boot/dts/brcm/bcm7346.dtsi
  32. 1 0
      arch/mips/boot/dts/brcm/bcm7358.dtsi
  33. 1 0
      arch/mips/boot/dts/brcm/bcm7360.dtsi
  34. 1 0
      arch/mips/boot/dts/brcm/bcm7362.dtsi
  35. 1 0
      arch/mips/boot/dts/brcm/bcm7420.dtsi
  36. 1 0
      arch/mips/boot/dts/brcm/bcm7425.dtsi
  37. 1 0
      arch/mips/boot/dts/brcm/bcm7435.dtsi
  38. 7 2
      arch/mips/include/asm/elf.h
  39. 4 0
      arch/mips/include/asm/fpu.h
  40. 2 1
      arch/mips/include/asm/octeon/octeon-feature.h
  41. 1 1
      arch/mips/include/asm/processor.h
  42. 2 2
      arch/mips/include/asm/stackframe.h
  43. 1 3
      arch/mips/include/asm/syscall.h
  44. 9 6
      arch/mips/include/uapi/asm/unistd.h
  45. 1 1
      arch/mips/kernel/binfmt_elfn32.c
  46. 1 1
      arch/mips/kernel/binfmt_elfo32.c
  47. 2 4
      arch/mips/kernel/process.c
  48. 1 0
      arch/mips/kernel/scall32-o32.S
  49. 1 0
      arch/mips/kernel/scall64-64.S
  50. 1 0
      arch/mips/kernel/scall64-n32.S
  51. 1 0
      arch/mips/kernel/scall64-o32.S
  52. 1 0
      arch/mips/kernel/setup.c
  53. 6 19
      arch/mips/kernel/traps.c
  54. 0 10
      arch/mips/mm/sc-mips.c
  55. 0 8
      arch/mips/mti-malta/malta-init.c
  56. 4 4
      arch/mips/pci/pci-mt7620.c
  57. 1 0
      arch/x86/Kconfig
  58. 1 1
      arch/x86/include/asm/processor.h
  59. 1 1
      arch/x86/mm/numa.c
  60. 4 2
      block/blk-core.c
  61. 42 38
      crypto/algif_skcipher.c
  62. 5 1
      crypto/crypto_user.c
  63. 20 0
      drivers/ata/ahci.c
  64. 1 0
      drivers/ata/ahci.h
  65. 1 0
      drivers/ata/ahci_brcmstb.c
  66. 24 3
      drivers/ata/libahci.c
  67. 1 0
      drivers/ata/libata-core.c
  68. 13 22
      drivers/ata/libata-sff.c
  69. 28 21
      drivers/base/component.c
  70. 8 8
      drivers/base/regmap/regmap-mmio.c
  71. 12 11
      drivers/crypto/atmel-sha.c
  72. 1 1
      drivers/crypto/marvell/cesa.c
  73. 3 2
      drivers/gpio/gpio-altera.c
  74. 4 3
      drivers/gpio/gpio-davinci.c
  75. 4 43
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  76. 6 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
  77. 91 69
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  78. 8 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  79. 2 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
  80. 4 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
  81. 19 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
  82. 5 6
      drivers/gpu/drm/amd/amdgpu/ci_dpm.c
  83. 80 77
      drivers/gpu/drm/amd/amdgpu/cik.c
  84. 2 2
      drivers/gpu/drm/amd/amdgpu/cik_sdma.c
  85. 3 3
      drivers/gpu/drm/amd/amdgpu/cz_dpm.c
  86. 35 35
      drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
  87. 5 5
      drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
  88. 4 4
      drivers/gpu/drm/amd/amdgpu/kv_dpm.c
  89. 8 2
      drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
  90. 8 0
      drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
  91. 5 2
      drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
  92. 4 1
      drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
  93. 5 2
      drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
  94. 1 2
      drivers/gpu/drm/amd/amdgpu/vi.c
  95. 32 0
      drivers/gpu/drm/amd/include/amd_shared.h
  96. 2 0
      drivers/gpu/drm/amd/include/cgs_common.h
  97. 18 0
      drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
  98. 17 2
      drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
  99. 5 0
      drivers/gpu/drm/radeon/radeon_sa.c
  100. 2 5
      drivers/infiniband/core/sysfs.c

+ 1 - 1
Documentation/cgroup-v2.txt

@@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and
 conventions of cgroup v2.  It describes all userland-visible aspects
 conventions of cgroup v2.  It describes all userland-visible aspects
 of cgroup including core and specific controller behaviors.  All
 of cgroup including core and specific controller behaviors.  All
 future changes must be reflected in this document.  Documentation for
 future changes must be reflected in this document.  Documentation for
-v1 is available under Documentation/cgroup-legacy/.
+v1 is available under Documentation/cgroup-v1/.
 
 
 CONTENTS
 CONTENTS
 
 

+ 1 - 0
Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt

@@ -8,6 +8,7 @@ OHCI and EHCI controllers.
 Required properties:
 Required properties:
 - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
 - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
 	      "renesas,pci-r8a7791" for the R8A7791 SoC;
 	      "renesas,pci-r8a7791" for the R8A7791 SoC;
+	      "renesas,pci-r8a7793" for the R8A7793 SoC;
 	      "renesas,pci-r8a7794" for the R8A7794 SoC;
 	      "renesas,pci-r8a7794" for the R8A7794 SoC;
 	      "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
 	      "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
 
 

+ 1 - 0
Documentation/devicetree/bindings/pci/rcar-pci.txt

@@ -4,6 +4,7 @@ Required properties:
 compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
 compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
 	    "renesas,pcie-r8a7790" for the R8A7790 SoC;
 	    "renesas,pcie-r8a7790" for the R8A7790 SoC;
 	    "renesas,pcie-r8a7791" for the R8A7791 SoC;
 	    "renesas,pcie-r8a7791" for the R8A7791 SoC;
+	    "renesas,pcie-r8a7793" for the R8A7793 SoC;
 	    "renesas,pcie-r8a7795" for the R8A7795 SoC;
 	    "renesas,pcie-r8a7795" for the R8A7795 SoC;
 	    "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
 	    "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
 
 

+ 2 - 0
Documentation/devicetree/bindings/sound/fsl-asoc-card.txt

@@ -30,6 +30,8 @@ The compatible list for this generic sound card currently:
  "fsl,imx-audio-sgtl5000"
  "fsl,imx-audio-sgtl5000"
  (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
  (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
 
 
+ "fsl,imx-audio-wm8960"
+
 Required properties:
 Required properties:
 
 
   - compatible		: Contains one of entries in the compatible list.
   - compatible		: Contains one of entries in the compatible list.

+ 35 - 2
Documentation/devicetree/bindings/thermal/rcar-thermal.txt

@@ -1,8 +1,9 @@
 * Renesas R-Car Thermal
 * Renesas R-Car Thermal
 
 
 Required properties:
 Required properties:
-- compatible		: "renesas,thermal-<soctype>", "renesas,rcar-thermal"
-			  as fallback.
+- compatible		: "renesas,thermal-<soctype>",
+			   "renesas,rcar-gen2-thermal" (with thermal-zone) or
+			   "renesas,rcar-thermal" (without thermal-zone) as fallback.
 			  Examples with soctypes are:
 			  Examples with soctypes are:
 			    - "renesas,thermal-r8a73a4" (R-Mobile APE6)
 			    - "renesas,thermal-r8a73a4" (R-Mobile APE6)
 			    - "renesas,thermal-r8a7779" (R-Car H1)
 			    - "renesas,thermal-r8a7779" (R-Car H1)
@@ -36,3 +37,35 @@ thermal@e61f0000 {
 		0xe61f0300 0x38>;
 		0xe61f0300 0x38>;
 	interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 	interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 };
 };
+
+Example (with thermal-zone):
+
+thermal-zones {
+	cpu_thermal: cpu-thermal {
+		polling-delay-passive	= <1000>;
+		polling-delay		= <5000>;
+
+		thermal-sensors = <&thermal>;
+
+		trips {
+			cpu-crit {
+				temperature	= <115000>;
+				hysteresis	= <0>;
+				type		= "critical";
+			};
+		};
+		cooling-maps {
+		};
+	};
+};
+
+thermal: thermal@e61f0000 {
+	compatible =	"renesas,thermal-r8a7790",
+			"renesas,rcar-gen2-thermal",
+			"renesas,rcar-thermal";
+	reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
+	interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
+	power-domains = <&cpg_clocks>;
+	#thermal-sensor-cells = <0>;
+};

+ 11 - 0
Documentation/kernel-parameters.txt

@@ -4235,6 +4235,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			The default value of this parameter is determined by
 			The default value of this parameter is determined by
 			the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
 			the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
 
 
+	workqueue.debug_force_rr_cpu
+			Workqueue used to implicitly guarantee that work
+			items queued without explicit CPU specified are put
+			on the local CPU.  This guarantee is no longer true
+			and while local CPU is still preferred work items
+			may be put on foreign CPUs.  This debug option
+			forces round-robin CPU selection to flush out
+			usages which depend on the now broken guarantee.
+			When enabled, memory and cache locality will be
+			impacted.
+
 	x2apic_phys	[X86-64,APIC] Use x2apic physical mode instead of
 	x2apic_phys	[X86-64,APIC] Use x2apic physical mode instead of
 			default x2apic cluster mode on platforms
 			default x2apic cluster mode on platforms
 			supporting x2apic.
 			supporting x2apic.

+ 5 - 12
MAINTAINERS

@@ -2362,14 +2362,6 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
 S:	Maintained
 S:	Maintained
 N:	bcm2835
 N:	bcm2835
 
 
-BROADCOM BCM33XX MIPS ARCHITECTURE
-M:	Kevin Cernekee <cernekee@gmail.com>
-L:	linux-mips@linux-mips.org
-S:	Maintained
-F:	arch/mips/bcm3384/*
-F:	arch/mips/include/asm/mach-bcm3384/*
-F:	arch/mips/kernel/*bmips*
-
 BROADCOM BCM47XX MIPS ARCHITECTURE
 BROADCOM BCM47XX MIPS ARCHITECTURE
 M:	Hauke Mehrtens <hauke@hauke-m.de>
 M:	Hauke Mehrtens <hauke@hauke-m.de>
 M:	Rafał Miłecki <zajec5@gmail.com>
 M:	Rafał Miłecki <zajec5@gmail.com>
@@ -9787,10 +9779,11 @@ S:	Supported
 F:	drivers/scsi/be2iscsi/
 F:	drivers/scsi/be2iscsi/
 
 
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
-M:	Sathya Perla <sathya.perla@avagotech.com>
-M:	Ajit Khaparde <ajit.khaparde@avagotech.com>
-M:	Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
-M:	Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+M:	Sathya Perla <sathya.perla@broadcom.com>
+M:	Ajit Khaparde <ajit.khaparde@broadcom.com>
+M:	Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
+M:	Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+M:	Somnath Kotur <somnath.kotur@broadcom.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://www.emulex.com
 W:	http://www.emulex.com
 S:	Supported
 S:	Supported

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 5
 PATCHLEVEL = 5
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Blurry Fish Butt
 NAME = Blurry Fish Butt
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 20 - 1
arch/arc/Kconfig

@@ -338,6 +338,19 @@ config ARC_PAGE_SIZE_4K
 
 
 endchoice
 endchoice
 
 
+choice
+	prompt "MMU Super Page Size"
+	depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE
+	default ARC_HUGEPAGE_2M
+
+config ARC_HUGEPAGE_2M
+	bool "2MB"
+
+config ARC_HUGEPAGE_16M
+	bool "16MB"
+
+endchoice
+
 if ISA_ARCOMPACT
 if ISA_ARCOMPACT
 
 
 config ARC_COMPACT_IRQ_LEVELS
 config ARC_COMPACT_IRQ_LEVELS
@@ -410,7 +423,7 @@ config ARC_HAS_RTC
 	default n
 	default n
 	depends on !SMP
 	depends on !SMP
 
 
-config ARC_HAS_GRTC
+config ARC_HAS_GFRC
 	bool "SMP synchronized 64-bit cycle counter"
 	bool "SMP synchronized 64-bit cycle counter"
 	default y
 	default y
 	depends on SMP
 	depends on SMP
@@ -566,6 +579,12 @@ endmenu
 endmenu	 # "ARC Architecture Configuration"
 endmenu	 # "ARC Architecture Configuration"
 
 
 source "mm/Kconfig"
 source "mm/Kconfig"
+
+config FORCE_MAX_ZONEORDER
+	int "Maximum zone order"
+	default "12" if ARC_HUGEPAGE_16M
+	default "11"
+
 source "net/Kconfig"
 source "net/Kconfig"
 source "drivers/Kconfig"
 source "drivers/Kconfig"
 source "fs/Kconfig"
 source "fs/Kconfig"

+ 1 - 1
arch/arc/configs/vdk_hs38_smp_defconfig

@@ -16,7 +16,7 @@ CONFIG_ARC_PLAT_AXS10X=y
 CONFIG_AXS103=y
 CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
 CONFIG_SMP=y
-# CONFIG_ARC_HAS_GRTC is not set
+# CONFIG_ARC_HAS_GFRC is not set
 CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_PREEMPT=y
 CONFIG_PREEMPT=y

+ 1 - 2
arch/arc/include/asm/arcregs.h

@@ -349,14 +349,13 @@ struct cpuinfo_arc {
 	struct cpuinfo_arc_bpu bpu;
 	struct cpuinfo_arc_bpu bpu;
 	struct bcr_identity core;
 	struct bcr_identity core;
 	struct bcr_isa isa;
 	struct bcr_isa isa;
-	struct bcr_timer timers;
 	unsigned int vec_base;
 	unsigned int vec_base;
 	struct cpuinfo_arc_ccm iccm, dccm;
 	struct cpuinfo_arc_ccm iccm, dccm;
 	struct {
 	struct {
 		unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
 		unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
 			     fpu_sp:1, fpu_dp:1, pad2:6,
 			     fpu_sp:1, fpu_dp:1, pad2:6,
 			     debug:1, ap:1, smart:1, rtt:1, pad3:4,
 			     debug:1, ap:1, smart:1, rtt:1, pad3:4,
-			     pad4:8;
+			     timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
 	} extn;
 	} extn;
 	struct bcr_mpy extn_mpy;
 	struct bcr_mpy extn_mpy;
 	struct bcr_extn_xymem extn_xymem;
 	struct bcr_extn_xymem extn_xymem;

+ 5 - 2
arch/arc/include/asm/irqflags-arcv2.h

@@ -30,8 +30,11 @@
 /* Was Intr taken in User Mode */
 /* Was Intr taken in User Mode */
 #define AUX_IRQ_ACT_BIT_U	31
 #define AUX_IRQ_ACT_BIT_U	31
 
 
-/* 0 is highest level, but taken by FIRQs, if present in design */
-#define ARCV2_IRQ_DEF_PRIO		0
+/*
+ * User space should be interruptable even by lowest prio interrupt
+ * Safe even if actual interrupt priorities is fewer or even one
+ */
+#define ARCV2_IRQ_DEF_PRIO	15
 
 
 /* seed value for status register */
 /* seed value for status register */
 #define ISA_INIT_STATUS_BITS	(STATUS_IE_MASK | STATUS_AD_MASK | \
 #define ISA_INIT_STATUS_BITS	(STATUS_IE_MASK | STATUS_AD_MASK | \

+ 2 - 2
arch/arc/include/asm/mcip.h

@@ -39,8 +39,8 @@ struct mcip_cmd {
 #define CMD_DEBUG_SET_MASK		0x34
 #define CMD_DEBUG_SET_MASK		0x34
 #define CMD_DEBUG_SET_SELECT		0x36
 #define CMD_DEBUG_SET_SELECT		0x36
 
 
-#define CMD_GRTC_READ_LO		0x42
-#define CMD_GRTC_READ_HI		0x43
+#define CMD_GFRC_READ_LO		0x42
+#define CMD_GFRC_READ_HI		0x43
 
 
 #define CMD_IDU_ENABLE			0x71
 #define CMD_IDU_ENABLE			0x71
 #define CMD_IDU_DISABLE			0x72
 #define CMD_IDU_DISABLE			0x72

+ 26 - 19
arch/arc/include/asm/pgtable.h

@@ -179,37 +179,44 @@
 #define __S111  PAGE_U_X_W_R
 #define __S111  PAGE_U_X_W_R
 
 
 /****************************************************************
 /****************************************************************
- * Page Table Lookup split
+ * 2 tier (PGD:PTE) software page walker
  *
  *
- * We implement 2 tier paging and since this is all software, we are free
- * to customize the span of a PGD / PTE entry to suit us
- *
- *			32 bit virtual address
+ * [31]		    32 bit virtual address              [0]
  * -------------------------------------------------------
  * -------------------------------------------------------
- * | BITS_FOR_PGD    |  BITS_FOR_PTE    |  BITS_IN_PAGE  |
+ * |               | <------------ PGDIR_SHIFT ----------> |
+ * |		   |					 |
+ * | BITS_FOR_PGD  |  BITS_FOR_PTE  | <-- PAGE_SHIFT --> |
  * -------------------------------------------------------
  * -------------------------------------------------------
  *       |                  |                |
  *       |                  |                |
  *       |                  |                --> off in page frame
  *       |                  |                --> off in page frame
- *       |		    |
  *       |                  ---> index into Page Table
  *       |                  ---> index into Page Table
- *       |
  *       ----> index into Page Directory
  *       ----> index into Page Directory
+ *
+ * In a single page size configuration, only PAGE_SHIFT is fixed
+ * So both PGD and PTE sizing can be tweaked
+ *  e.g. 8K page (PAGE_SHIFT 13) can have
+ *  - PGDIR_SHIFT 21  -> 11:8:13 address split
+ *  - PGDIR_SHIFT 24  -> 8:11:13 address split
+ *
+ * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
+ * so the sizing flexibility is gone.
  */
  */
 
 
-#define BITS_IN_PAGE	PAGE_SHIFT
-
-/* Optimal Sizing of Pg Tbl - based on MMU page size */
-#if defined(CONFIG_ARC_PAGE_SIZE_8K)
-#define BITS_FOR_PTE	8		/* 11:8:13 */
-#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
-#define BITS_FOR_PTE	8		/* 10:8:14 */
-#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
-#define BITS_FOR_PTE	9		/* 11:9:12 */
+#if defined(CONFIG_ARC_HUGEPAGE_16M)
+#define PGDIR_SHIFT	24
+#elif defined(CONFIG_ARC_HUGEPAGE_2M)
+#define PGDIR_SHIFT	21
+#else
+/*
+ * Only Normal page support so "hackable" (see comment above)
+ * Default value provides 11:8:13 (8K), 11:9:12 (4K)
+ */
+#define PGDIR_SHIFT	21
 #endif
 #endif
 
 
-#define BITS_FOR_PGD	(32 - BITS_FOR_PTE - BITS_IN_PAGE)
+#define BITS_FOR_PTE	(PGDIR_SHIFT - PAGE_SHIFT)
+#define BITS_FOR_PGD	(32 - PGDIR_SHIFT)
 
 
-#define PGDIR_SHIFT	(32 - BITS_FOR_PGD)
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)	/* vaddr span, not PDG sz */
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)	/* vaddr span, not PDG sz */
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
 

+ 18 - 1
arch/arc/kernel/entry-arcv2.S

@@ -211,7 +211,11 @@ debug_marker_syscall:
 ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
 ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
 ; entry was via Exception in DS which got preempted in kernel).
 ; entry was via Exception in DS which got preempted in kernel).
 ;
 ;
-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
+
 .Lintr_ret_to_delay_slot:
 .Lintr_ret_to_delay_slot:
 debug_marker_ds:
 debug_marker_ds:
 
 
@@ -222,18 +226,23 @@ debug_marker_ds:
 	ld	r2, [sp, PT_ret]
 	ld	r2, [sp, PT_ret]
 	ld	r3, [sp, PT_status32]
 	ld	r3, [sp, PT_status32]
 
 
+	; STAT32 for Int return created from scratch
+	; (No delay dlot, disable Further intr in trampoline)
+
 	bic  	r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
 	bic  	r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
 	st	r0, [sp, PT_status32]
 	st	r0, [sp, PT_status32]
 
 
 	mov	r1, .Lintr_ret_to_delay_slot_2
 	mov	r1, .Lintr_ret_to_delay_slot_2
 	st	r1, [sp, PT_ret]
 	st	r1, [sp, PT_ret]
 
 
+	; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
 	st	r2, [sp, 0]
 	st	r2, [sp, 0]
 	st	r3, [sp, 4]
 	st	r3, [sp, 4]
 
 
 	b	.Lisr_ret_fast_path
 	b	.Lisr_ret_fast_path
 
 
 .Lintr_ret_to_delay_slot_2:
 .Lintr_ret_to_delay_slot_2:
+	; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
 	sub	sp, sp, SZ_PT_REGS
 	sub	sp, sp, SZ_PT_REGS
 	st	r9, [sp, -4]
 	st	r9, [sp, -4]
 
 
@@ -243,11 +252,19 @@ debug_marker_ds:
 	ld	r9, [sp, 4]
 	ld	r9, [sp, 4]
 	sr	r9, [erstatus]
 	sr	r9, [erstatus]
 
 
+	; restore AUX_USER_SP if returning to U mode
+	bbit0	r9, STATUS_U_BIT, 1f
+	ld	r9, [sp, PT_sp]
+	sr	r9, [AUX_USER_SP]
+
+1:
 	ld	r9, [sp, 8]
 	ld	r9, [sp, 8]
 	sr	r9, [erbta]
 	sr	r9, [erbta]
 
 
 	ld	r9, [sp, -4]
 	ld	r9, [sp, -4]
 	add	sp, sp, SZ_PT_REGS
 	add	sp, sp, SZ_PT_REGS
+
+	; return from pure kernel mode to delay slot
 	rtie
 	rtie
 
 
 END(ret_from_exception)
 END(ret_from_exception)

+ 24 - 17
arch/arc/kernel/intc-arcv2.c

@@ -14,6 +14,8 @@
 #include <linux/irqchip.h>
 #include <linux/irqchip.h>
 #include <asm/irq.h>
 #include <asm/irq.h>
 
 
+static int irq_prio;
+
 /*
 /*
  * Early Hardware specific Interrupt setup
  * Early Hardware specific Interrupt setup
  * -Called very early (start_kernel -> setup_arch -> setup_processor)
  * -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,6 +26,14 @@ void arc_init_IRQ(void)
 {
 {
 	unsigned int tmp;
 	unsigned int tmp;
 
 
+	struct irq_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+		unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
+#else
+		unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
+#endif
+	} irq_bcr;
+
 	struct aux_irq_ctrl {
 	struct aux_irq_ctrl {
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #ifdef CONFIG_CPU_BIG_ENDIAN
 		unsigned int res3:18, save_idx_regs:1, res2:1,
 		unsigned int res3:18, save_idx_regs:1, res2:1,
@@ -46,28 +56,25 @@ void arc_init_IRQ(void)
 
 
 	WRITE_AUX(AUX_IRQ_CTRL, ictrl);
 	WRITE_AUX(AUX_IRQ_CTRL, ictrl);
 
 
-	/* setup status32, don't enable intr yet as kernel doesn't want */
-	tmp = read_aux_reg(0xa);
-	tmp |= ISA_INIT_STATUS_BITS;
-	tmp &= ~STATUS_IE_MASK;
-	asm volatile("flag %0	\n"::"r"(tmp));
-
 	/*
 	/*
 	 * ARCv2 core intc provides multiple interrupt priorities (upto 16).
 	 * ARCv2 core intc provides multiple interrupt priorities (upto 16).
 	 * Typical builds though have only two levels (0-high, 1-low)
 	 * Typical builds though have only two levels (0-high, 1-low)
 	 * Linux by default uses lower prio 1 for most irqs, reserving 0 for
 	 * Linux by default uses lower prio 1 for most irqs, reserving 0 for
 	 * NMI style interrupts in future (say perf)
 	 * NMI style interrupts in future (say perf)
-	 *
-	 * Read the intc BCR to confirm that Linux default priority is avail
-	 * in h/w
-	 *
-	 * Note:
-	 *  IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
-	 *  is 0 based.
 	 */
 	 */
-	tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF;
-	if (ARCV2_IRQ_DEF_PRIO > tmp)
-		panic("Linux default irq prio incorrect\n");
+
+	READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
+
+	irq_prio = irq_bcr.prio;	/* Encoded as N-1 for N levels */
+	pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
+		irq_prio + 1, irq_prio,
+		irq_bcr.firq ? " FIRQ (not used)":"");
+
+	/* setup status32, don't enable intr yet as kernel doesn't want */
+	tmp = read_aux_reg(0xa);
+	tmp |= STATUS_AD_MASK | (irq_prio << 1);
+	tmp &= ~STATUS_IE_MASK;
+	asm volatile("flag %0	\n"::"r"(tmp));
 }
 }
 
 
 static void arcv2_irq_mask(struct irq_data *data)
 static void arcv2_irq_mask(struct irq_data *data)
@@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data)
 {
 {
 	/* set default priority */
 	/* set default priority */
 	write_aux_reg(AUX_IRQ_SELECT, data->irq);
 	write_aux_reg(AUX_IRQ_SELECT, data->irq);
-	write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+	write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
 
 
 	/*
 	/*
 	 * hw auto enables (linux unmask) all by default
 	 * hw auto enables (linux unmask) all by default

+ 5 - 5
arch/arc/kernel/mcip.c

@@ -96,13 +96,13 @@ static void mcip_probe_n_setup(void)
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #ifdef CONFIG_CPU_BIG_ENDIAN
 		unsigned int pad3:8,
 		unsigned int pad3:8,
 			     idu:1, llm:1, num_cores:6,
 			     idu:1, llm:1, num_cores:6,
-			     iocoh:1,  grtc:1, dbg:1, pad2:1,
+			     iocoh:1,  gfrc:1, dbg:1, pad2:1,
 			     msg:1, sem:1, ipi:1, pad:1,
 			     msg:1, sem:1, ipi:1, pad:1,
 			     ver:8;
 			     ver:8;
 #else
 #else
 		unsigned int ver:8,
 		unsigned int ver:8,
 			     pad:1, ipi:1, sem:1, msg:1,
 			     pad:1, ipi:1, sem:1, msg:1,
-			     pad2:1, dbg:1, grtc:1, iocoh:1,
+			     pad2:1, dbg:1, gfrc:1, iocoh:1,
 			     num_cores:6, llm:1, idu:1,
 			     num_cores:6, llm:1, idu:1,
 			     pad3:8;
 			     pad3:8;
 #endif
 #endif
@@ -116,7 +116,7 @@ static void mcip_probe_n_setup(void)
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.idu, "IDU "),
 		IS_AVAIL1(mp.idu, "IDU "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
-		IS_AVAIL1(mp.grtc, "GRTC"));
+		IS_AVAIL1(mp.gfrc, "GFRC"));
 
 
 	idu_detected = mp.idu;
 	idu_detected = mp.idu;
 
 
@@ -125,8 +125,8 @@ static void mcip_probe_n_setup(void)
 		__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
 		__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
 	}
 	}
 
 
-	if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
-		panic("kernel trying to use non-existent GRTC\n");
+	if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
+		panic("kernel trying to use non-existent GFRC\n");
 }
 }
 
 
 struct plat_smp_ops plat_smp_ops = {
 struct plat_smp_ops plat_smp_ops = {

+ 13 - 7
arch/arc/kernel/setup.c

@@ -45,6 +45,7 @@ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 static void read_arc_build_cfg_regs(void)
 static void read_arc_build_cfg_regs(void)
 {
 {
 	struct bcr_perip uncached_space;
 	struct bcr_perip uncached_space;
+	struct bcr_timer timer;
 	struct bcr_generic bcr;
 	struct bcr_generic bcr;
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
 	unsigned long perip_space;
 	unsigned long perip_space;
@@ -53,7 +54,11 @@ static void read_arc_build_cfg_regs(void)
 	READ_BCR(AUX_IDENTITY, cpu->core);
 	READ_BCR(AUX_IDENTITY, cpu->core);
 	READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 	READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 
 
-	READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
+	READ_BCR(ARC_REG_TIMERS_BCR, timer);
+	cpu->extn.timer0 = timer.t0;
+	cpu->extn.timer1 = timer.t1;
+	cpu->extn.rtc = timer.rtc;
+
 	cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 	cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 
 
 	READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
 	READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
@@ -208,9 +213,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 		       (unsigned int)(arc_get_core_freq() / 10000) % 100);
 		       (unsigned int)(arc_get_core_freq() / 10000) % 100);
 
 
 	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
 	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
-		       IS_AVAIL1(cpu->timers.t0, "Timer0 "),
-		       IS_AVAIL1(cpu->timers.t1, "Timer1 "),
-		       IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ",
+		       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
+		       IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
+		       IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
 				 CONFIG_ARC_HAS_RTC));
 				 CONFIG_ARC_HAS_RTC));
 
 
 	n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
 	n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
@@ -293,13 +298,13 @@ static void arc_chk_core_config(void)
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
 	int fpu_enabled;
 	int fpu_enabled;
 
 
-	if (!cpu->timers.t0)
+	if (!cpu->extn.timer0)
 		panic("Timer0 is not present!\n");
 		panic("Timer0 is not present!\n");
 
 
-	if (!cpu->timers.t1)
+	if (!cpu->extn.timer1)
 		panic("Timer1 is not present!\n");
 		panic("Timer1 is not present!\n");
 
 
-	if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc)
+	if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
 		panic("RTC is not present\n");
 		panic("RTC is not present\n");
 
 
 #ifdef CONFIG_ARC_HAS_DCCM
 #ifdef CONFIG_ARC_HAS_DCCM
@@ -334,6 +339,7 @@ static void arc_chk_core_config(void)
 		panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
 		panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
 
 
 	if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
 	if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+	    IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
 	    !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
 	    !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
 		panic("llock/scond livelock workaround missing\n");
 		panic("llock/scond livelock workaround missing\n");
 }
 }

+ 4 - 4
arch/arc/kernel/time.c

@@ -62,7 +62,7 @@
 
 
 /********** Clock Source Device *********/
 /********** Clock Source Device *********/
 
 
-#ifdef CONFIG_ARC_HAS_GRTC
+#ifdef CONFIG_ARC_HAS_GFRC
 
 
 static int arc_counter_setup(void)
 static int arc_counter_setup(void)
 {
 {
@@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs)
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
-	__mcip_cmd(CMD_GRTC_READ_LO, 0);
+	__mcip_cmd(CMD_GFRC_READ_LO, 0);
 	stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
 	stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
 
 
-	__mcip_cmd(CMD_GRTC_READ_HI, 0);
+	__mcip_cmd(CMD_GFRC_READ_HI, 0);
 	stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
 	stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
 
 
 	local_irq_restore(flags);
 	local_irq_restore(flags);
@@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs)
 }
 }
 
 
 static struct clocksource arc_counter = {
 static struct clocksource arc_counter = {
-	.name   = "ARConnect GRTC",
+	.name   = "ARConnect GFRC",
 	.rating = 400,
 	.rating = 400,
 	.read   = arc_counter_read,
 	.read   = arc_counter_read,
 	.mask   = CLOCKSOURCE_MASK(64),
 	.mask   = CLOCKSOURCE_MASK(64),

+ 7 - 2
arch/arm/common/icst.c

@@ -16,7 +16,7 @@
  */
  */
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-
+#include <asm/div64.h>
 #include <asm/hardware/icst.h>
 #include <asm/hardware/icst.h>
 
 
 /*
 /*
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
 
 
 unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
 unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
 {
 {
-	return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+	u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+	u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+	do_div(dividend, divisor);
+	return (unsigned long)dividend;
 }
 }
 
 
 EXPORT_SYMBOL(icst_hz);
 EXPORT_SYMBOL(icst_hz);
@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
 
 
 		if (f > p->vco_min && f <= p->vco_max)
 		if (f > p->vco_min && f <= p->vco_max)
 			break;
 			break;
+		i++;
 	} while (i < 8);
 	} while (i < 8);
 
 
 	if (i >= 8)
 	if (i >= 8)

+ 16 - 17
arch/arm/configs/omap2plus_defconfig

@@ -292,24 +292,23 @@ CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_TILEBLITTING=y
-CONFIG_OMAP2_DSS=m
-CONFIG_OMAP5_DSS_HDMI=y
-CONFIG_OMAP2_DSS_SDI=y
-CONFIG_OMAP2_DSS_DSI=y
+CONFIG_FB_OMAP5_DSS_HDMI=y
+CONFIG_FB_OMAP2_DSS_SDI=y
+CONFIG_FB_OMAP2_DSS_DSI=y
 CONFIG_FB_OMAP2=m
 CONFIG_FB_OMAP2=m
-CONFIG_DISPLAY_ENCODER_TFP410=m
-CONFIG_DISPLAY_ENCODER_TPD12S015=m
-CONFIG_DISPLAY_CONNECTOR_DVI=m
-CONFIG_DISPLAY_CONNECTOR_HDMI=m
-CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m
-CONFIG_DISPLAY_PANEL_DPI=m
-CONFIG_DISPLAY_PANEL_DSI_CM=m
-CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m
-CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m
-CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m
-CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m
+CONFIG_FB_OMAP2_ENCODER_TFP410=m
+CONFIG_FB_OMAP2_ENCODER_TPD12S015=m
+CONFIG_FB_OMAP2_CONNECTOR_DVI=m
+CONFIG_FB_OMAP2_CONNECTOR_HDMI=m
+CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m
+CONFIG_FB_OMAP2_PANEL_DPI=m
+CONFIG_FB_OMAP2_PANEL_DSI_CM=m
+CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m
+CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m
+CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m
+CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m
+CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m
+CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=y
 CONFIG_LCD_PLATFORM=y

+ 1 - 0
arch/arm64/include/asm/arch_gicv3.h

@@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(void)
 	u64 irqstat;
 	u64 irqstat;
 
 
 	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
 	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+	dsb(sy);
 	return irqstat;
 	return irqstat;
 }
 }
 
 

+ 1 - 0
arch/arm64/include/asm/kvm_arm.h

@@ -182,6 +182,7 @@
 #define CPTR_EL2_TCPAC	(1 << 31)
 #define CPTR_EL2_TCPAC	(1 << 31)
 #define CPTR_EL2_TTA	(1 << 20)
 #define CPTR_EL2_TTA	(1 << 20)
 #define CPTR_EL2_TFP	(1 << CPTR_EL2_TFP_SHIFT)
 #define CPTR_EL2_TFP	(1 << CPTR_EL2_TFP_SHIFT)
+#define CPTR_EL2_DEFAULT	0x000033ff
 
 
 /* Hyp Debug Configuration Register bits */
 /* Hyp Debug Configuration Register bits */
 #define MDCR_EL2_TDRA		(1 << 11)
 #define MDCR_EL2_TDRA		(1 << 11)

+ 6 - 2
arch/arm64/include/asm/kvm_emulate.h

@@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
 
 
 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 {
 {
-	u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+	u32 mode;
 
 
-	if (vcpu_mode_is_32bit(vcpu))
+	if (vcpu_mode_is_32bit(vcpu)) {
+		mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
 		return mode > COMPAT_PSR_MODE_USR;
 		return mode > COMPAT_PSR_MODE_USR;
+	}
+
+	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
 
 
 	return mode != PSR_MODE_EL0t;
 	return mode != PSR_MODE_EL0t;
 }
 }

+ 6 - 2
arch/arm64/kvm/hyp/switch.c

@@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
 	write_sysreg(val, hcr_el2);
 	write_sysreg(val, hcr_el2);
 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
 	write_sysreg(1 << 15, hstr_el2);
 	write_sysreg(1 << 15, hstr_el2);
-	write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+
+	val = CPTR_EL2_DEFAULT;
+	val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+	write_sysreg(val, cptr_el2);
+
 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
 }
 }
 
 
@@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
 	write_sysreg(HCR_RW, hcr_el2);
 	write_sysreg(HCR_RW, hcr_el2);
 	write_sysreg(0, hstr_el2);
 	write_sysreg(0, hstr_el2);
 	write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
 	write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
-	write_sysreg(0, cptr_el2);
+	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 }
 
 
 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)

+ 35 - 3
arch/arm64/kvm/inject_fault.c

@@ -27,7 +27,11 @@
 
 
 #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
 #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
 				 PSR_I_BIT | PSR_D_BIT)
 				 PSR_I_BIT | PSR_D_BIT)
-#define EL1_EXCEPT_SYNC_OFFSET	0x200
+
+#define CURRENT_EL_SP_EL0_VECTOR	0x0
+#define CURRENT_EL_SP_ELx_VECTOR	0x200
+#define LOWER_EL_AArch64_VECTOR		0x400
+#define LOWER_EL_AArch32_VECTOR		0x600
 
 
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
 {
@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
 		*fsr = 0x14;
 		*fsr = 0x14;
 }
 }
 
 
+enum exception_type {
+	except_type_sync	= 0,
+	except_type_irq		= 0x80,
+	except_type_fiq		= 0x100,
+	except_type_serror	= 0x180,
+};
+
+static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
+{
+	u64 exc_offset;
+
+	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
+	case PSR_MODE_EL1t:
+		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+		break;
+	case PSR_MODE_EL1h:
+		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+		break;
+	case PSR_MODE_EL0t:
+		exc_offset = LOWER_EL_AArch64_VECTOR;
+		break;
+	default:
+		exc_offset = LOWER_EL_AArch32_VECTOR;
+	}
+
+	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+}
+
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
 {
 	unsigned long cpsr = *vcpu_cpsr(vcpu);
 	unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 
 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
 
 
@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 
 	/*
 	/*
 	 * Build an unknown exception, depending on the instruction
 	 * Build an unknown exception, depending on the instruction

+ 4 - 5
arch/arm64/kvm/sys_regs.c

@@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
 		if (likely(r->access(vcpu, params, r))) {
 		if (likely(r->access(vcpu, params, r))) {
 			/* Skip instruction, since it was emulated */
 			/* Skip instruction, since it was emulated */
 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+			/* Handled */
+			return 0;
 		}
 		}
-
-		/* Handled */
-		return 0;
 	}
 	}
 
 
 	/* Not handled */
 	/* Not handled */
@@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
 }
 }
 
 
 /**
 /**
- * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  * @run:  The kvm_run struct
  */
  */
@@ -1095,7 +1094,7 @@ out:
 }
 }
 
 
 /**
 /**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  * @run:  The kvm_run struct
  */
  */

+ 1 - 1
arch/mips/Kconfig

@@ -2085,7 +2085,7 @@ config PAGE_SIZE_32KB
 
 
 config PAGE_SIZE_64KB
 config PAGE_SIZE_64KB
 	bool "64kB"
 	bool "64kB"
-	depends on !CPU_R3000 && !CPU_TX39XX
+	depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000
 	help
 	help
 	  Using 64kB page size will result in higher performance kernel at
 	  Using 64kB page size will result in higher performance kernel at
 	  the price of higher memory consumption.  This option is available on
 	  the price of higher memory consumption.  This option is available on

+ 1 - 0
arch/mips/boot/dts/brcm/bcm6328.dtsi

@@ -74,6 +74,7 @@
 		timer: timer@10000040 {
 		timer: timer@10000040 {
 			compatible = "syscon";
 			compatible = "syscon";
 			reg = <0x10000040 0x2c>;
 			reg = <0x10000040 0x2c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7125.dtsi

@@ -98,6 +98,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x60c>;
 			reg = <0x404000 0x60c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7346.dtsi

@@ -118,6 +118,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7358.dtsi

@@ -112,6 +112,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7360.dtsi

@@ -112,6 +112,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7362.dtsi

@@ -118,6 +118,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7420.dtsi

@@ -99,6 +99,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x60c>;
 			reg = <0x404000 0x60c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7425.dtsi

@@ -100,6 +100,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 1 - 0
arch/mips/boot/dts/brcm/bcm7435.dtsi

@@ -114,6 +114,7 @@
 		sun_top_ctrl: syscon@404000 {
 		sun_top_ctrl: syscon@404000 {
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
 			reg = <0x404000 0x51c>;
 			reg = <0x404000 0x51c>;
+			little-endian;
 		};
 		};
 
 
 		reboot {
 		reboot {

+ 7 - 2
arch/mips/include/asm/elf.h

@@ -227,7 +227,7 @@ struct mips_elf_abiflags_v0 {
 	int __res = 1;							\
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 	struct elfhdr *__h = (hdr);					\
 									\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 		__res = 0;						\
 		__res = 0;						\
@@ -258,7 +258,7 @@ struct mips_elf_abiflags_v0 {
 	int __res = 1;							\
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 	struct elfhdr *__h = (hdr);					\
 									\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS64)			\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS64)			\
 		__res = 0;						\
 		__res = 0;						\
@@ -285,6 +285,11 @@ struct mips_elf_abiflags_v0 {
 
 
 #endif /* !defined(ELF_ARCH) */
 #endif /* !defined(ELF_ARCH) */
 
 
+#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
+
+#define vmcore_elf32_check_arch mips_elf_check_machine
+#define vmcore_elf64_check_arch mips_elf_check_machine
+
 struct mips_abi;
 struct mips_abi;
 
 
 extern struct mips_abi mips_abi;
 extern struct mips_abi mips_abi;

+ 4 - 0
arch/mips/include/asm/fpu.h

@@ -179,6 +179,10 @@ static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
 		if (save)
 		if (save)
 			_save_fp(tsk);
 			_save_fp(tsk);
 		__disable_fpu();
 		__disable_fpu();
+	} else {
+		/* FPU should not have been left enabled with no owner */
+		WARN(read_c0_status() & ST0_CU1,
+		     "Orphaned FPU left enabled");
 	}
 	}
 	KSTK_STATUS(tsk) &= ~ST0_CU1;
 	KSTK_STATUS(tsk) &= ~ST0_CU1;
 	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
 	clear_tsk_thread_flag(tsk, TIF_USEDFPU);

+ 2 - 1
arch/mips/include/asm/octeon/octeon-feature.h

@@ -128,7 +128,8 @@ static inline int octeon_has_feature(enum octeon_feature feature)
 	case OCTEON_FEATURE_PCIE:
 	case OCTEON_FEATURE_PCIE:
 		return OCTEON_IS_MODEL(OCTEON_CN56XX)
 		return OCTEON_IS_MODEL(OCTEON_CN56XX)
 			|| OCTEON_IS_MODEL(OCTEON_CN52XX)
 			|| OCTEON_IS_MODEL(OCTEON_CN52XX)
-			|| OCTEON_IS_MODEL(OCTEON_CN6XXX);
+			|| OCTEON_IS_MODEL(OCTEON_CN6XXX)
+			|| OCTEON_IS_MODEL(OCTEON_CN7XXX);
 
 
 	case OCTEON_FEATURE_SRIO:
 	case OCTEON_FEATURE_SRIO:
 		return OCTEON_IS_MODEL(OCTEON_CN63XX)
 		return OCTEON_IS_MODEL(OCTEON_CN63XX)

+ 1 - 1
arch/mips/include/asm/processor.h

@@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count;
  * User space process size: 2GB. This is hardcoded into a few places,
  * User space process size: 2GB. This is hardcoded into a few places,
  * so don't change it unless you know what you are doing.
  * so don't change it unless you know what you are doing.
  */
  */
-#define TASK_SIZE	0x7fff8000UL
+#define TASK_SIZE	0x80000000UL
 #endif
 #endif
 
 
 #define STACK_TOP_MAX	TASK_SIZE
 #define STACK_TOP_MAX	TASK_SIZE

+ 2 - 2
arch/mips/include/asm/stackframe.h

@@ -289,7 +289,7 @@
 		.set	reorder
 		.set	reorder
 		.set	noat
 		.set	noat
 		mfc0	a0, CP0_STATUS
 		mfc0	a0, CP0_STATUS
-		li	v1, 0xff00
+		li	v1, ST0_CU1 | ST0_IM
 		ori	a0, STATMASK
 		ori	a0, STATMASK
 		xori	a0, STATMASK
 		xori	a0, STATMASK
 		mtc0	a0, CP0_STATUS
 		mtc0	a0, CP0_STATUS
@@ -330,7 +330,7 @@
 		ori	a0, STATMASK
 		ori	a0, STATMASK
 		xori	a0, STATMASK
 		xori	a0, STATMASK
 		mtc0	a0, CP0_STATUS
 		mtc0	a0, CP0_STATUS
-		li	v1, 0xff00
+		li	v1, ST0_CU1 | ST0_FR | ST0_IM
 		and	a0, v1
 		and	a0, v1
 		LONG_L	v0, PT_STATUS(sp)
 		LONG_L	v0, PT_STATUS(sp)
 		nor	v1, $0, v1
 		nor	v1, $0, v1

+ 1 - 3
arch/mips/include/asm/syscall.h

@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
 	/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
 	/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
 	if ((config_enabled(CONFIG_32BIT) ||
 	if ((config_enabled(CONFIG_32BIT) ||
 	    test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
 	    test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
-	    (regs->regs[2] == __NR_syscall)) {
+	    (regs->regs[2] == __NR_syscall))
 		i++;
 		i++;
-		n++;
-	}
 
 
 	while (n--)
 	while (n--)
 		ret |= mips_get_syscall_arg(args++, task, regs, i++);
 		ret |= mips_get_syscall_arg(args++, task, regs, i++);

+ 9 - 6
arch/mips/include/uapi/asm/unistd.h

@@ -380,16 +380,17 @@
 #define __NR_userfaultfd		(__NR_Linux + 357)
 #define __NR_userfaultfd		(__NR_Linux + 357)
 #define __NR_membarrier			(__NR_Linux + 358)
 #define __NR_membarrier			(__NR_Linux + 358)
 #define __NR_mlock2			(__NR_Linux + 359)
 #define __NR_mlock2			(__NR_Linux + 359)
+#define __NR_copy_file_range		(__NR_Linux + 360)
 
 
 /*
 /*
  * Offset of the last Linux o32 flavoured syscall
  * Offset of the last Linux o32 flavoured syscall
  */
  */
-#define __NR_Linux_syscalls		359
+#define __NR_Linux_syscalls		360
 
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 
 #define __NR_O32_Linux			4000
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		359
+#define __NR_O32_Linux_syscalls		360
 
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 
@@ -717,16 +718,17 @@
 #define __NR_userfaultfd		(__NR_Linux + 317)
 #define __NR_userfaultfd		(__NR_Linux + 317)
 #define __NR_membarrier			(__NR_Linux + 318)
 #define __NR_membarrier			(__NR_Linux + 318)
 #define __NR_mlock2			(__NR_Linux + 319)
 #define __NR_mlock2			(__NR_Linux + 319)
+#define __NR_copy_file_range		(__NR_Linux + 320)
 
 
 /*
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  * Offset of the last Linux 64-bit flavoured syscall
  */
  */
-#define __NR_Linux_syscalls		319
+#define __NR_Linux_syscalls		320
 
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 
 #define __NR_64_Linux			5000
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		319
+#define __NR_64_Linux_syscalls		320
 
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 
@@ -1058,15 +1060,16 @@
 #define __NR_userfaultfd		(__NR_Linux + 321)
 #define __NR_userfaultfd		(__NR_Linux + 321)
 #define __NR_membarrier			(__NR_Linux + 322)
 #define __NR_membarrier			(__NR_Linux + 322)
 #define __NR_mlock2			(__NR_Linux + 323)
 #define __NR_mlock2			(__NR_Linux + 323)
+#define __NR_copy_file_range		(__NR_Linux + 324)
 
 
 /*
 /*
  * Offset of the last N32 flavoured syscall
  * Offset of the last N32 flavoured syscall
  */
  */
-#define __NR_Linux_syscalls		323
+#define __NR_Linux_syscalls		324
 
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 
 #define __NR_N32_Linux			6000
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		323
+#define __NR_N32_Linux_syscalls		324
 
 
 #endif /* _UAPI_ASM_UNISTD_H */
 #endif /* _UAPI_ASM_UNISTD_H */

+ 1 - 1
arch/mips/kernel/binfmt_elfn32.c

@@ -35,7 +35,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 	int __res = 1;							\
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 	struct elfhdr *__h = (hdr);					\
 									\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 		__res = 0;						\
 		__res = 0;						\

+ 1 - 1
arch/mips/kernel/binfmt_elfo32.c

@@ -47,7 +47,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 	int __res = 1;							\
 	int __res = 1;							\
 	struct elfhdr *__h = (hdr);					\
 	struct elfhdr *__h = (hdr);					\
 									\
 									\
-	if (__h->e_machine != EM_MIPS)					\
+	if (!mips_elf_check_machine(__h))				\
 		__res = 0;						\
 		__res = 0;						\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
 		__res = 0;						\
 		__res = 0;						\

+ 2 - 4
arch/mips/kernel/process.c

@@ -65,12 +65,10 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
 	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
 	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
 	status |= KU_USER;
 	status |= KU_USER;
 	regs->cp0_status = status;
 	regs->cp0_status = status;
+	lose_fpu(0);
+	clear_thread_flag(TIF_MSA_CTX_LIVE);
 	clear_used_math();
 	clear_used_math();
-	clear_fpu_owner();
 	init_dsp();
 	init_dsp();
-	clear_thread_flag(TIF_USEDMSA);
-	clear_thread_flag(TIF_MSA_CTX_LIVE);
-	disable_msa();
 	regs->cp0_epc = pc;
 	regs->cp0_epc = pc;
 	regs->regs[29] = sp;
 	regs->regs[29] = sp;
 }
 }

+ 1 - 0
arch/mips/kernel/scall32-o32.S

@@ -595,3 +595,4 @@ EXPORT(sys_call_table)
 	PTR	sys_userfaultfd
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range		/* 4360 */

+ 1 - 0
arch/mips/kernel/scall64-64.S

@@ -433,4 +433,5 @@ EXPORT(sys_call_table)
 	PTR	sys_userfaultfd
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range		/* 5320 */
 	.size	sys_call_table,.-sys_call_table
 	.size	sys_call_table,.-sys_call_table

+ 1 - 0
arch/mips/kernel/scall64-n32.S

@@ -423,4 +423,5 @@ EXPORT(sysn32_call_table)
 	PTR	sys_userfaultfd
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range
 	.size	sysn32_call_table,.-sysn32_call_table
 	.size	sysn32_call_table,.-sysn32_call_table

+ 1 - 0
arch/mips/kernel/scall64-o32.S

@@ -578,4 +578,5 @@ EXPORT(sys32_call_table)
 	PTR	sys_userfaultfd
 	PTR	sys_userfaultfd
 	PTR	sys_membarrier
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_mlock2
+	PTR	sys_copy_file_range		/* 4360 */
 	.size	sys32_call_table,.-sys32_call_table
 	.size	sys32_call_table,.-sys32_call_table

+ 1 - 0
arch/mips/kernel/setup.c

@@ -782,6 +782,7 @@ static inline void prefill_possible_map(void) {}
 void __init setup_arch(char **cmdline_p)
 void __init setup_arch(char **cmdline_p)
 {
 {
 	cpu_probe();
 	cpu_probe();
+	mips_cm_probe();
 	prom_init();
 	prom_init();
 
 
 	setup_early_fdc_console();
 	setup_early_fdc_console();

+ 6 - 19
arch/mips/kernel/traps.c

@@ -663,7 +663,7 @@ static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 	return -1;
 	return -1;
 }
 }
 
 
-static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 {
 {
 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 		int rd = (opcode & MM_RS) >> 16;
 		int rd = (opcode & MM_RS) >> 16;
@@ -1119,11 +1119,12 @@ no_r2_instr:
 	if (get_isa16_mode(regs->cp0_epc)) {
 	if (get_isa16_mode(regs->cp0_epc)) {
 		unsigned short mmop[2] = { 0 };
 		unsigned short mmop[2] = { 0 };
 
 
-		if (unlikely(get_user(mmop[0], epc) < 0))
+		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
 			status = SIGSEGV;
 			status = SIGSEGV;
-		if (unlikely(get_user(mmop[1], epc) < 0))
+		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
 			status = SIGSEGV;
 			status = SIGSEGV;
-		opcode = (mmop[0] << 16) | mmop[1];
+		opcode = mmop[0];
+		opcode = (opcode << 16) | mmop[1];
 
 
 		if (status < 0)
 		if (status < 0)
 			status = simulate_rdhwr_mm(regs, opcode);
 			status = simulate_rdhwr_mm(regs, opcode);
@@ -1369,26 +1370,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 		if (unlikely(compute_return_epc(regs) < 0))
 		if (unlikely(compute_return_epc(regs) < 0))
 			break;
 			break;
 
 
-		if (get_isa16_mode(regs->cp0_epc)) {
-			unsigned short mmop[2] = { 0 };
-
-			if (unlikely(get_user(mmop[0], epc) < 0))
-				status = SIGSEGV;
-			if (unlikely(get_user(mmop[1], epc) < 0))
-				status = SIGSEGV;
-			opcode = (mmop[0] << 16) | mmop[1];
-
-			if (status < 0)
-				status = simulate_rdhwr_mm(regs, opcode);
-		} else {
+		if (!get_isa16_mode(regs->cp0_epc)) {
 			if (unlikely(get_user(opcode, epc) < 0))
 			if (unlikely(get_user(opcode, epc) < 0))
 				status = SIGSEGV;
 				status = SIGSEGV;
 
 
 			if (!cpu_has_llsc && status < 0)
 			if (!cpu_has_llsc && status < 0)
 				status = simulate_llsc(regs, opcode);
 				status = simulate_llsc(regs, opcode);
-
-			if (status < 0)
-				status = simulate_rdhwr_normal(regs, opcode);
 		}
 		}
 
 
 		if (status < 0)
 		if (status < 0)

+ 0 - 10
arch/mips/mm/sc-mips.c

@@ -181,10 +181,6 @@ static int __init mips_sc_probe_cm3(void)
 	return 1;
 	return 1;
 }
 }
 
 
-void __weak platform_early_l2_init(void)
-{
-}
-
 static inline int __init mips_sc_probe(void)
 static inline int __init mips_sc_probe(void)
 {
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -194,12 +190,6 @@ static inline int __init mips_sc_probe(void)
 	/* Mark as not present until probe completed */
 	/* Mark as not present until probe completed */
 	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
 	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
 
 
-	/*
-	 * Do we need some platform specific probing before
-	 * we configure L2?
-	 */
-	platform_early_l2_init();
-
 	if (mips_cm_revision() >= CM_REV_CM3)
 	if (mips_cm_revision() >= CM_REV_CM3)
 		return mips_sc_probe_cm3();
 		return mips_sc_probe_cm3();
 
 

+ 0 - 8
arch/mips/mti-malta/malta-init.c

@@ -293,7 +293,6 @@ mips_pci_controller:
 	console_config();
 	console_config();
 #endif
 #endif
 	/* Early detection of CMP support */
 	/* Early detection of CMP support */
-	mips_cm_probe();
 	mips_cpc_probe();
 	mips_cpc_probe();
 
 
 	if (!register_cps_smp_ops())
 	if (!register_cps_smp_ops())
@@ -304,10 +303,3 @@ mips_pci_controller:
 		return;
 		return;
 	register_up_smp_ops();
 	register_up_smp_ops();
 }
 }
-
-void platform_early_l2_init(void)
-{
-	/* L2 configuration lives in the CM3 */
-	if (mips_cm_revision() >= CM_REV_CM3)
-		mips_cm_probe();
-}

+ 4 - 4
arch/mips/pci/pci-mt7620.c

@@ -297,12 +297,12 @@ static int mt7620_pci_probe(struct platform_device *pdev)
 		return PTR_ERR(rstpcie0);
 		return PTR_ERR(rstpcie0);
 
 
 	bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
 	bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
-	if (!bridge_base)
-		return -ENOMEM;
+	if (IS_ERR(bridge_base))
+		return PTR_ERR(bridge_base);
 
 
 	pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
 	pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
-	if (!pcie_base)
-		return -ENOMEM;
+	if (IS_ERR(pcie_base))
+		return PTR_ERR(pcie_base);
 
 
 	iomem_resource.start = 0;
 	iomem_resource.start = 0;
 	iomem_resource.end = ~0;
 	iomem_resource.end = ~0;

+ 1 - 0
arch/x86/Kconfig

@@ -475,6 +475,7 @@ config X86_UV
 	depends on X86_64
 	depends on X86_64
 	depends on X86_EXTENDED_PLATFORM
 	depends on X86_EXTENDED_PLATFORM
 	depends on NUMA
 	depends on NUMA
+	depends on EFI
 	depends on X86_X2APIC
 	depends on X86_X2APIC
 	depends on PCI
 	depends on PCI
 	---help---
 	---help---

+ 1 - 1
arch/x86/include/asm/processor.h

@@ -766,7 +766,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  * Return saved PC of a blocked thread.
  * Return saved PC of a blocked thread.
  * What is this good for? it will be always the scheduler or ret_from_fork.
  * What is this good for? it will be always the scheduler or ret_from_fork.
  */
  */
-#define thread_saved_pc(t)	(*(unsigned long *)((t)->thread.sp - 8))
+#define thread_saved_pc(t)	READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
 
 
 #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
 #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.sp0 - 1)
 extern unsigned long KSTK_ESP(struct task_struct *task);
 extern unsigned long KSTK_ESP(struct task_struct *task);

+ 1 - 1
arch/x86/mm/numa.c

@@ -469,7 +469,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
 {
 {
 	int i, nid;
 	int i, nid;
 	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
 	nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
-	unsigned long start, end;
+	phys_addr_t start, end;
 	struct memblock_region *r;
 	struct memblock_region *r;
 
 
 	/*
 	/*

+ 4 - 2
block/blk-core.c

@@ -2455,14 +2455,16 @@ struct request *blk_peek_request(struct request_queue *q)
 
 
 			rq = NULL;
 			rq = NULL;
 			break;
 			break;
-		} else if (ret == BLKPREP_KILL) {
+		} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
+			int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
+
 			rq->cmd_flags |= REQ_QUIET;
 			rq->cmd_flags |= REQ_QUIET;
 			/*
 			/*
 			 * Mark this request as started so we don't trigger
 			 * Mark this request as started so we don't trigger
 			 * any debug logic in the end I/O path.
 			 * any debug logic in the end I/O path.
 			 */
 			 */
 			blk_start_request(rq);
 			blk_start_request(rq);
-			__blk_end_request_all(rq, -EIO);
+			__blk_end_request_all(rq, err);
 		} else {
 		} else {
 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
 			break;
 			break;

+ 42 - 38
crypto/algif_skcipher.c

@@ -65,18 +65,10 @@ struct skcipher_async_req {
 	struct skcipher_async_rsgl first_sgl;
 	struct skcipher_async_rsgl first_sgl;
 	struct list_head list;
 	struct list_head list;
 	struct scatterlist *tsg;
 	struct scatterlist *tsg;
-	char iv[];
+	atomic_t *inflight;
+	struct skcipher_request req;
 };
 };
 
 
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
-	crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
-	crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
-	crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
 		      sizeof(struct scatterlist) - 1)
 		      sizeof(struct scatterlist) - 1)
 
 
@@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
 
 
 static void skcipher_async_cb(struct crypto_async_request *req, int err)
 static void skcipher_async_cb(struct crypto_async_request *req, int err)
 {
 {
-	struct sock *sk = req->data;
-	struct alg_sock *ask = alg_sk(sk);
-	struct skcipher_ctx *ctx = ask->private;
-	struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+	struct skcipher_async_req *sreq = req->data;
 	struct kiocb *iocb = sreq->iocb;
 	struct kiocb *iocb = sreq->iocb;
 
 
-	atomic_dec(&ctx->inflight);
+	atomic_dec(sreq->inflight);
 	skcipher_free_async_sgls(sreq);
 	skcipher_free_async_sgls(sreq);
-	kfree(req);
+	kzfree(sreq);
 	iocb->ki_complete(iocb, err, err);
 	iocb->ki_complete(iocb, err, err);
 }
 }
 
 
@@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
 {
 {
 	struct sock *sk = sock->sk;
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
 	struct alg_sock *ask = alg_sk(sk);
+	struct sock *psk = ask->parent;
+	struct alg_sock *pask = alg_sk(psk);
 	struct skcipher_ctx *ctx = ask->private;
 	struct skcipher_ctx *ctx = ask->private;
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+	struct skcipher_tfm *skc = pask->private;
+	struct crypto_skcipher *tfm = skc->skcipher;
 	unsigned ivsize = crypto_skcipher_ivsize(tfm);
 	unsigned ivsize = crypto_skcipher_ivsize(tfm);
 	struct skcipher_sg_list *sgl;
 	struct skcipher_sg_list *sgl;
 	struct af_alg_control con = {};
 	struct af_alg_control con = {};
@@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 {
 {
 	struct sock *sk = sock->sk;
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
 	struct alg_sock *ask = alg_sk(sk);
+	struct sock *psk = ask->parent;
+	struct alg_sock *pask = alg_sk(psk);
 	struct skcipher_ctx *ctx = ask->private;
 	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_tfm *skc = pask->private;
+	struct crypto_skcipher *tfm = skc->skcipher;
 	struct skcipher_sg_list *sgl;
 	struct skcipher_sg_list *sgl;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 	struct skcipher_async_req *sreq;
 	struct skcipher_async_req *sreq;
 	struct skcipher_request *req;
 	struct skcipher_request *req;
 	struct skcipher_async_rsgl *last_rsgl = NULL;
 	struct skcipher_async_rsgl *last_rsgl = NULL;
-	unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
-	unsigned int reqlen = sizeof(struct skcipher_async_req) +
-				GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+	unsigned int txbufs = 0, len = 0, tx_nents;
+	unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 	int err = -ENOMEM;
 	int err = -ENOMEM;
 	bool mark = false;
 	bool mark = false;
+	char *iv;
 
 
-	lock_sock(sk);
-	req = kmalloc(reqlen, GFP_KERNEL);
-	if (unlikely(!req))
-		goto unlock;
+	sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+	if (unlikely(!sreq))
+		goto out;
 
 
-	sreq = GET_SREQ(req, ctx);
+	req = &sreq->req;
+	iv = (char *)(req + 1) + reqsize;
 	sreq->iocb = msg->msg_iocb;
 	sreq->iocb = msg->msg_iocb;
-	memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
 	INIT_LIST_HEAD(&sreq->list);
 	INIT_LIST_HEAD(&sreq->list);
+	sreq->inflight = &ctx->inflight;
+
+	lock_sock(sk);
+	tx_nents = skcipher_all_sg_nents(ctx);
 	sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
 	sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
-	if (unlikely(!sreq->tsg)) {
-		kfree(req);
+	if (unlikely(!sreq->tsg))
 		goto unlock;
 		goto unlock;
-	}
 	sg_init_table(sreq->tsg, tx_nents);
 	sg_init_table(sreq->tsg, tx_nents);
-	memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
-	skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
-	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      skcipher_async_cb, sk);
+	memcpy(iv, ctx->iv, ivsize);
+	skcipher_request_set_tfm(req, tfm);
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+				      skcipher_async_cb, sreq);
 
 
 	while (iov_iter_count(&msg->msg_iter)) {
 	while (iov_iter_count(&msg->msg_iter)) {
 		struct skcipher_async_rsgl *rsgl;
 		struct skcipher_async_rsgl *rsgl;
@@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 		sg_mark_end(sreq->tsg + txbufs - 1);
 		sg_mark_end(sreq->tsg + txbufs - 1);
 
 
 	skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
 	skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
-				   len, sreq->iv);
+				   len, iv);
 	err = ctx->enc ? crypto_skcipher_encrypt(req) :
 	err = ctx->enc ? crypto_skcipher_encrypt(req) :
 			 crypto_skcipher_decrypt(req);
 			 crypto_skcipher_decrypt(req);
 	if (err == -EINPROGRESS) {
 	if (err == -EINPROGRESS) {
 		atomic_inc(&ctx->inflight);
 		atomic_inc(&ctx->inflight);
 		err = -EIOCBQUEUED;
 		err = -EIOCBQUEUED;
+		sreq = NULL;
 		goto unlock;
 		goto unlock;
 	}
 	}
 free:
 free:
 	skcipher_free_async_sgls(sreq);
 	skcipher_free_async_sgls(sreq);
-	kfree(req);
 unlock:
 unlock:
 	skcipher_wmem_wakeup(sk);
 	skcipher_wmem_wakeup(sk);
 	release_sock(sk);
 	release_sock(sk);
+	kzfree(sreq);
+out:
 	return err;
 	return err;
 }
 }
 
 
@@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
 {
 {
 	struct sock *sk = sock->sk;
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
 	struct alg_sock *ask = alg_sk(sk);
+	struct sock *psk = ask->parent;
+	struct alg_sock *pask = alg_sk(psk);
 	struct skcipher_ctx *ctx = ask->private;
 	struct skcipher_ctx *ctx = ask->private;
-	unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
-		&ctx->req));
+	struct skcipher_tfm *skc = pask->private;
+	struct crypto_skcipher *tfm = skc->skcipher;
+	unsigned bs = crypto_skcipher_blocksize(tfm);
 	struct skcipher_sg_list *sgl;
 	struct skcipher_sg_list *sgl;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 	int err = -EAGAIN;
 	int err = -EAGAIN;
@@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
 	ask->private = ctx;
 	ask->private = ctx;
 
 
 	skcipher_request_set_tfm(&ctx->req, skcipher);
 	skcipher_request_set_tfm(&ctx->req, skcipher);
-	skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+	skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+						 CRYPTO_TFM_REQ_MAY_BACKLOG,
 				      af_alg_complete, &ctx->completion);
 				      af_alg_complete, &ctx->completion);
 
 
 	sk->sk_destruct = skcipher_sock_destruct;
 	sk->sk_destruct = skcipher_sock_destruct;

+ 5 - 1
crypto/crypto_user.c

@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 		if (link->dump == NULL)
 		if (link->dump == NULL)
 			return -EINVAL;
 			return -EINVAL;
 
 
+		down_read(&crypto_alg_sem);
 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
 
 
@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 				.done = link->done,
 				.done = link->done,
 				.min_dump_alloc = dump_alloc,
 				.min_dump_alloc = dump_alloc,
 			};
 			};
-			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+			err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
 		}
 		}
+		up_read(&crypto_alg_sem);
+
+		return err;
 	}
 	}
 
 
 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,

+ 20 - 0
drivers/ata/ahci.c

@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */

+ 1 - 0
drivers/ata/ahci.h

@@ -250,6 +250,7 @@ enum {
 	AHCI_HFLAG_MULTI_MSI		= 0,
 	AHCI_HFLAG_MULTI_MSI		= 0,
 	AHCI_HFLAG_MULTI_MSIX		= 0,
 	AHCI_HFLAG_MULTI_MSIX		= 0,
 #endif
 #endif
+	AHCI_HFLAG_WAKE_BEFORE_STOP	= (1 << 22), /* wake before DMA stop */
 
 
 	/* ap->flags bits */
 	/* ap->flags bits */
 
 

+ 1 - 0
drivers/ata/ahci_brcmstb.c

@@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
 	if (IS_ERR(hpriv))
 	if (IS_ERR(hpriv))
 		return PTR_ERR(hpriv);
 		return PTR_ERR(hpriv);
 	hpriv->plat_data = priv;
 	hpriv->plat_data = priv;
+	hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
 
 
 	brcm_sata_alpm_init(hpriv);
 	brcm_sata_alpm_init(hpriv);
 
 

+ 24 - 3
drivers/ata/libahci.c

@@ -496,8 +496,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
 		}
 		}
 	}
 	}
 
 
-	/* fabricate port_map from cap.nr_ports */
-	if (!port_map) {
+	/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+	if (!port_map && vers < 0x10300) {
 		port_map = (1 << ahci_nr_ports(cap)) - 1;
 		port_map = (1 << ahci_nr_ports(cap)) - 1;
 		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
 		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
 
 
@@ -593,8 +593,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
 int ahci_stop_engine(struct ata_port *ap)
 int ahci_stop_engine(struct ata_port *ap)
 {
 {
 	void __iomem *port_mmio = ahci_port_base(ap);
 	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
 	u32 tmp;
 	u32 tmp;
 
 
+	/*
+	 * On some controllers, stopping a port's DMA engine while the port
+	 * is in ALPM state (partial or slumber) results in failures on
+	 * subsequent DMA engine starts.  For those controllers, put the
+	 * port back in active state before stopping its DMA engine.
+	 */
+	if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
+	    (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
+	    ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
+		dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
+		return -EIO;
+	}
+
 	tmp = readl(port_mmio + PORT_CMD);
 	tmp = readl(port_mmio + PORT_CMD);
 
 
 	/* check if the HBA is idle */
 	/* check if the HBA is idle */
@@ -689,6 +703,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
 	void __iomem *port_mmio = ahci_port_base(ap);
 	void __iomem *port_mmio = ahci_port_base(ap);
 
 
 	if (policy != ATA_LPM_MAX_POWER) {
 	if (policy != ATA_LPM_MAX_POWER) {
+		/* wakeup flag only applies to the max power policy */
+		hints &= ~ATA_LPM_WAKE_ONLY;
+
 		/*
 		/*
 		 * Disable interrupts on Phy Ready. This keeps us from
 		 * Disable interrupts on Phy Ready. This keeps us from
 		 * getting woken up due to spurious phy ready
 		 * getting woken up due to spurious phy ready
@@ -704,7 +721,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
 		u32 cmd = readl(port_mmio + PORT_CMD);
 		u32 cmd = readl(port_mmio + PORT_CMD);
 
 
 		if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
 		if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
-			cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
+			if (!(hints & ATA_LPM_WAKE_ONLY))
+				cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
 			cmd |= PORT_CMD_ICC_ACTIVE;
 			cmd |= PORT_CMD_ICC_ACTIVE;
 
 
 			writel(cmd, port_mmio + PORT_CMD);
 			writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +730,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
 
 
 			/* wait 10ms to be sure we've come out of LPM state */
 			/* wait 10ms to be sure we've come out of LPM state */
 			ata_msleep(ap, 10);
 			ata_msleep(ap, 10);
+
+			if (hints & ATA_LPM_WAKE_ONLY)
+				return 0;
 		} else {
 		} else {
 			cmd |= PORT_CMD_ALPE;
 			cmd |= PORT_CMD_ALPE;
 			if (policy == ATA_LPM_MIN_POWER)
 			if (policy == ATA_LPM_MIN_POWER)

+ 1 - 0
drivers/ata/libata-core.c

@@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
+	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
 	/* Odd clown on sil3726/4726 PMPs */
 	/* Odd clown on sil3726/4726 PMPs */
 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
 
 

+ 13 - 22
drivers/ata/libata-sff.c

@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 {
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_port *ap = qc->ap;
-	unsigned long flags;
 
 
 	if (ap->ops->error_handler) {
 	if (ap->ops->error_handler) {
 		if (in_wq) {
 		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
-
 			/* EH might have kicked in while host lock is
 			/* EH might have kicked in while host lock is
 			 * released.
 			 * released.
 			 */
 			 */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 				} else
 				} else
 					ata_port_freeze(ap);
 					ata_port_freeze(ap);
 			}
 			}
-
-			spin_unlock_irqrestore(ap->lock, flags);
 		} else {
 		} else {
 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
 				ata_qc_complete(qc);
 				ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 		}
 		}
 	} else {
 	} else {
 		if (in_wq) {
 		if (in_wq) {
-			spin_lock_irqsave(ap->lock, flags);
 			ata_sff_irq_on(ap);
 			ata_sff_irq_on(ap);
 			ata_qc_complete(qc);
 			ata_qc_complete(qc);
-			spin_unlock_irqrestore(ap->lock, flags);
 		} else
 		} else
 			ata_qc_complete(qc);
 			ata_qc_complete(qc);
 	}
 	}
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 {
 {
 	struct ata_link *link = qc->dev->link;
 	struct ata_link *link = qc->dev->link;
 	struct ata_eh_info *ehi = &link->eh_info;
 	struct ata_eh_info *ehi = &link->eh_info;
-	unsigned long flags = 0;
 	int poll_next;
 	int poll_next;
 
 
+	lockdep_assert_held(ap->lock);
+
 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 
 
 	/* Make sure ata_sff_qc_issue() does not throw things
 	/* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
 			}
 			}
 		}
 		}
 
 
-		/* Send the CDB (atapi) or the first data block (ata pio out).
-		 * During the state transition, interrupt handler shouldn't
-		 * be invoked before the data transfer is complete and
-		 * hsm_task_state is changed. Hence, the following locking.
-		 */
-		if (in_wq)
-			spin_lock_irqsave(ap->lock, flags);
-
 		if (qc->tf.protocol == ATA_PROT_PIO) {
 		if (qc->tf.protocol == ATA_PROT_PIO) {
 			/* PIO data out protocol.
 			/* PIO data out protocol.
 			 * send first data block.
 			 * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
 			/* send CDB */
 			/* send CDB */
 			atapi_send_cdb(ap, qc);
 			atapi_send_cdb(ap, qc);
 
 
-		if (in_wq)
-			spin_unlock_irqrestore(ap->lock, flags);
-
 		/* if polling, ata_sff_pio_task() handles the rest.
 		/* if polling, ata_sff_pio_task() handles the rest.
 		 * otherwise, interrupt handler takes over from here.
 		 * otherwise, interrupt handler takes over from here.
 		 */
 		 */
@@ -1296,7 +1279,8 @@ fsm_start:
 		break;
 		break;
 	default:
 	default:
 		poll_next = 0;
 		poll_next = 0;
-		BUG();
+		WARN(true, "ata%d: SFF host state machine in invalid state %d",
+		     ap->print_id, ap->hsm_task_state);
 	}
 	}
 
 
 	return poll_next;
 	return poll_next;
@@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
 	u8 status;
 	u8 status;
 	int poll_next;
 	int poll_next;
 
 
+	spin_lock_irq(ap->lock);
+
 	BUG_ON(ap->sff_pio_task_link == NULL);
 	BUG_ON(ap->sff_pio_task_link == NULL);
 	/* qc can be NULL if timeout occurred */
 	/* qc can be NULL if timeout occurred */
 	qc = ata_qc_from_tag(ap, link->active_tag);
 	qc = ata_qc_from_tag(ap, link->active_tag);
 	if (!qc) {
 	if (!qc) {
 		ap->sff_pio_task_link = NULL;
 		ap->sff_pio_task_link = NULL;
-		return;
+		goto out_unlock;
 	}
 	}
 
 
 fsm_start:
 fsm_start:
@@ -1381,11 +1367,14 @@ fsm_start:
 	 */
 	 */
 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
 	if (status & ATA_BUSY) {
 	if (status & ATA_BUSY) {
+		spin_unlock_irq(ap->lock);
 		ata_msleep(ap, 2);
 		ata_msleep(ap, 2);
+		spin_lock_irq(ap->lock);
+
 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
 		if (status & ATA_BUSY) {
 		if (status & ATA_BUSY) {
 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
-			return;
+			goto out_unlock;
 		}
 		}
 	}
 	}
 
 
@@ -1402,6 +1391,8 @@ fsm_start:
 	 */
 	 */
 	if (poll_next)
 	if (poll_next)
 		goto fsm_start;
 		goto fsm_start;
+out_unlock:
+	spin_unlock_irq(ap->lock);
 }
 }
 
 
 /**
 /**

+ 28 - 21
drivers/base/component.c

@@ -206,6 +206,8 @@ static void component_match_release(struct device *master,
 		if (mc->release)
 		if (mc->release)
 			mc->release(master, mc->data);
 			mc->release(master, mc->data);
 	}
 	}
+
+	kfree(match->compare);
 }
 }
 
 
 static void devm_component_match_release(struct device *dev, void *res)
 static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev,
 	if (match->alloc == num)
 	if (match->alloc == num)
 		return 0;
 		return 0;
 
 
-	new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL);
+	new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
 	if (!new)
 	if (!new)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	if (match->compare) {
 	if (match->compare) {
 		memcpy(new, match->compare, sizeof(*new) *
 		memcpy(new, match->compare, sizeof(*new) *
 					    min(match->num, num));
 					    min(match->num, num));
-		devm_kfree(dev, match->compare);
+		kfree(match->compare);
 	}
 	}
 	match->compare = new;
 	match->compare = new;
 	match->alloc = num;
 	match->alloc = num;
@@ -283,6 +285,24 @@ void component_match_add_release(struct device *master,
 }
 }
 EXPORT_SYMBOL(component_match_add_release);
 EXPORT_SYMBOL(component_match_add_release);
 
 
+static void free_master(struct master *master)
+{
+	struct component_match *match = master->match;
+	int i;
+
+	list_del(&master->node);
+
+	if (match) {
+		for (i = 0; i < match->num; i++) {
+			struct component *c = match->compare[i].component;
+			if (c)
+				c->master = NULL;
+		}
+	}
+
+	kfree(master);
+}
+
 int component_master_add_with_match(struct device *dev,
 int component_master_add_with_match(struct device *dev,
 	const struct component_master_ops *ops,
 	const struct component_master_ops *ops,
 	struct component_match *match)
 	struct component_match *match)
@@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev,
 
 
 	ret = try_to_bring_up_master(master, NULL);
 	ret = try_to_bring_up_master(master, NULL);
 
 
-	if (ret < 0) {
-		/* Delete off the list if we weren't successful */
-		list_del(&master->node);
-		kfree(master);
-	}
+	if (ret < 0)
+		free_master(master);
+
 	mutex_unlock(&component_mutex);
 	mutex_unlock(&component_mutex);
 
 
 	return ret < 0 ? ret : 0;
 	return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@ void component_master_del(struct device *dev,
 	const struct component_master_ops *ops)
 	const struct component_master_ops *ops)
 {
 {
 	struct master *master;
 	struct master *master;
-	int i;
 
 
 	mutex_lock(&component_mutex);
 	mutex_lock(&component_mutex);
 	master = __master_find(dev, ops);
 	master = __master_find(dev, ops);
 	if (master) {
 	if (master) {
-		struct component_match *match = master->match;
-
 		take_down_master(master);
 		take_down_master(master);
-
-		list_del(&master->node);
-
-		if (match) {
-			for (i = 0; i < match->num; i++) {
-				struct component *c = match->compare[i].component;
-				if (c)
-					c->master = NULL;
-			}
-		}
-		kfree(master);
+		free_master(master);
 	}
 	}
 	mutex_unlock(&component_mutex);
 	mutex_unlock(&component_mutex);
 }
 }
@@ -486,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops)
 
 
 	ret = try_to_bring_up_masters(component);
 	ret = try_to_bring_up_masters(component);
 	if (ret < 0) {
 	if (ret < 0) {
+		if (component->master)
+			remove_component(component->master, component);
 		list_del(&component->node);
 		list_del(&component->node);
 
 
 		kfree(component);
 		kfree(component);

+ 8 - 8
drivers/base/regmap/regmap-mmio.c

@@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context,
 	while (val_size) {
 	while (val_size) {
 		switch (ctx->val_bytes) {
 		switch (ctx->val_bytes) {
 		case 1:
 		case 1:
-			__raw_writeb(*(u8 *)val, ctx->regs + offset);
+			writeb(*(u8 *)val, ctx->regs + offset);
 			break;
 			break;
 		case 2:
 		case 2:
-			__raw_writew(*(u16 *)val, ctx->regs + offset);
+			writew(*(u16 *)val, ctx->regs + offset);
 			break;
 			break;
 		case 4:
 		case 4:
-			__raw_writel(*(u32 *)val, ctx->regs + offset);
+			writel(*(u32 *)val, ctx->regs + offset);
 			break;
 			break;
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 		case 8:
 		case 8:
-			__raw_writeq(*(u64 *)val, ctx->regs + offset);
+			writeq(*(u64 *)val, ctx->regs + offset);
 			break;
 			break;
 #endif
 #endif
 		default:
 		default:
@@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context,
 	while (val_size) {
 	while (val_size) {
 		switch (ctx->val_bytes) {
 		switch (ctx->val_bytes) {
 		case 1:
 		case 1:
-			*(u8 *)val = __raw_readb(ctx->regs + offset);
+			*(u8 *)val = readb(ctx->regs + offset);
 			break;
 			break;
 		case 2:
 		case 2:
-			*(u16 *)val = __raw_readw(ctx->regs + offset);
+			*(u16 *)val = readw(ctx->regs + offset);
 			break;
 			break;
 		case 4:
 		case 4:
-			*(u32 *)val = __raw_readl(ctx->regs + offset);
+			*(u32 *)val = readl(ctx->regs + offset);
 			break;
 			break;
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 		case 8:
 		case 8:
-			*(u64 *)val = __raw_readq(ctx->regs + offset);
+			*(u64 *)val = readq(ctx->regs + offset);
 			break;
 			break;
 #endif
 #endif
 		default:
 		default:

+ 12 - 11
drivers/crypto/atmel-sha.c

@@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
 	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
 	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
 			SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
 			SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
 
 
-	clk_disable_unprepare(dd->iclk);
+	clk_disable(dd->iclk);
 
 
 	if (req->base.complete)
 	if (req->base.complete)
 		req->base.complete(&req->base, err);
 		req->base.complete(&req->base, err);
@@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
 {
 {
 	int err;
 	int err;
 
 
-	err = clk_prepare_enable(dd->iclk);
+	err = clk_enable(dd->iclk);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
@@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
 	dev_info(dd->dev,
 	dev_info(dd->dev,
 			"version: 0x%x\n", dd->hw_version);
 			"version: 0x%x\n", dd->hw_version);
 
 
-	clk_disable_unprepare(dd->iclk);
+	clk_disable(dd->iclk);
 }
 }
 
 
 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
 		goto res_err;
 		goto res_err;
 	}
 	}
 
 
+	err = clk_prepare(sha_dd->iclk);
+	if (err)
+		goto res_err;
+
 	atmel_sha_hw_version_init(sha_dd);
 	atmel_sha_hw_version_init(sha_dd);
 
 
 	atmel_sha_get_cap(sha_dd);
 	atmel_sha_get_cap(sha_dd);
@@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
 			if (IS_ERR(pdata)) {
 			if (IS_ERR(pdata)) {
 				dev_err(&pdev->dev, "platform data not available\n");
 				dev_err(&pdev->dev, "platform data not available\n");
 				err = PTR_ERR(pdata);
 				err = PTR_ERR(pdata);
-				goto res_err;
+				goto iclk_unprepare;
 			}
 			}
 		}
 		}
 		if (!pdata->dma_slave) {
 		if (!pdata->dma_slave) {
 			err = -ENXIO;
 			err = -ENXIO;
-			goto res_err;
+			goto iclk_unprepare;
 		}
 		}
 		err = atmel_sha_dma_init(sha_dd, pdata);
 		err = atmel_sha_dma_init(sha_dd, pdata);
 		if (err)
 		if (err)
@@ -1457,6 +1461,8 @@ err_algs:
 	if (sha_dd->caps.has_dma)
 	if (sha_dd->caps.has_dma)
 		atmel_sha_dma_cleanup(sha_dd);
 		atmel_sha_dma_cleanup(sha_dd);
 err_sha_dma:
 err_sha_dma:
+iclk_unprepare:
+	clk_unprepare(sha_dd->iclk);
 res_err:
 res_err:
 	tasklet_kill(&sha_dd->done_task);
 	tasklet_kill(&sha_dd->done_task);
 sha_dd_err:
 sha_dd_err:
@@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
 	if (sha_dd->caps.has_dma)
 	if (sha_dd->caps.has_dma)
 		atmel_sha_dma_cleanup(sha_dd);
 		atmel_sha_dma_cleanup(sha_dd);
 
 
-	iounmap(sha_dd->io_base);
-
-	clk_put(sha_dd->iclk);
-
-	if (sha_dd->irq >= 0)
-		free_irq(sha_dd->irq, sha_dd);
+	clk_unprepare(sha_dd->iclk);
 
 
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
drivers/crypto/marvell/cesa.c

@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
 	dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
-	if (!dma->cache_pool)
+	if (!dma->padding_pool)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	cesa->dma = dma;
 	cesa->dma = dma;

+ 3 - 2
drivers/gpio/gpio-altera.c

@@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
 		handle_simple_irq, IRQ_TYPE_NONE);
 		handle_simple_irq, IRQ_TYPE_NONE);
 
 
 	if (ret) {
 	if (ret) {
-		dev_info(&pdev->dev, "could not add irqchip\n");
-		return ret;
+		dev_err(&pdev->dev, "could not add irqchip\n");
+		goto teardown;
 	}
 	}
 
 
 	gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
 	gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
@@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
 skip_irq:
 skip_irq:
 	return 0;
 	return 0;
 teardown:
 teardown:
+	of_mm_gpiochip_remove(&altera_gc->mmchip);
 	pr_err("%s: registration failed with status %d\n",
 	pr_err("%s: registration failed with status %d\n",
 		node->full_name, ret);
 		node->full_name, ret);
 
 

+ 4 - 3
drivers/gpio/gpio-davinci.c

@@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc,
 static int davinci_gpio_probe(struct platform_device *pdev)
 static int davinci_gpio_probe(struct platform_device *pdev)
 {
 {
 	int i, base;
 	int i, base;
-	unsigned ngpio;
+	unsigned ngpio, nbank;
 	struct davinci_gpio_controller *chips;
 	struct davinci_gpio_controller *chips;
 	struct davinci_gpio_platform_data *pdata;
 	struct davinci_gpio_platform_data *pdata;
 	struct davinci_gpio_regs __iomem *regs;
 	struct davinci_gpio_regs __iomem *regs;
@@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
 	if (WARN_ON(ARCH_NR_GPIOS < ngpio))
 	if (WARN_ON(ARCH_NR_GPIOS < ngpio))
 		ngpio = ARCH_NR_GPIOS;
 		ngpio = ARCH_NR_GPIOS;
 
 
+	nbank = DIV_ROUND_UP(ngpio, 32);
 	chips = devm_kzalloc(dev,
 	chips = devm_kzalloc(dev,
-			     ngpio * sizeof(struct davinci_gpio_controller),
+			     nbank * sizeof(struct davinci_gpio_controller),
 			     GFP_KERNEL);
 			     GFP_KERNEL);
 	if (!chips)
 	if (!chips)
 		return -ENOMEM;
 		return -ENOMEM;
@@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
 			return irq;
 			return irq;
 		}
 		}
 
 
-		irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0,
+		irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
 							&davinci_gpio_irq_ops,
 							&davinci_gpio_irq_ops,
 							chips);
 							chips);
 		if (!irq_domain) {
 		if (!irq_domain) {

+ 4 - 43
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_enable_semaphores;
 extern int amdgpu_enable_semaphores;
 extern int amdgpu_powerplay;
 extern int amdgpu_powerplay;
+extern unsigned amdgpu_pcie_gen_cap;
+extern unsigned amdgpu_pcie_lane_cap;
 
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -132,47 +134,6 @@ extern int amdgpu_powerplay;
 #define AMDGPU_RESET_VCE			(1 << 13)
 #define AMDGPU_RESET_VCE			(1 << 13)
 #define AMDGPU_RESET_VCE1			(1 << 14)
 #define AMDGPU_RESET_VCE1			(1 << 14)
 
 
-/* CG block flags */
-#define AMDGPU_CG_BLOCK_GFX			(1 << 0)
-#define AMDGPU_CG_BLOCK_MC			(1 << 1)
-#define AMDGPU_CG_BLOCK_SDMA			(1 << 2)
-#define AMDGPU_CG_BLOCK_UVD			(1 << 3)
-#define AMDGPU_CG_BLOCK_VCE			(1 << 4)
-#define AMDGPU_CG_BLOCK_HDP			(1 << 5)
-#define AMDGPU_CG_BLOCK_BIF			(1 << 6)
-
-/* CG flags */
-#define AMDGPU_CG_SUPPORT_GFX_MGCG		(1 << 0)
-#define AMDGPU_CG_SUPPORT_GFX_MGLS		(1 << 1)
-#define AMDGPU_CG_SUPPORT_GFX_CGCG		(1 << 2)
-#define AMDGPU_CG_SUPPORT_GFX_CGLS		(1 << 3)
-#define AMDGPU_CG_SUPPORT_GFX_CGTS		(1 << 4)
-#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
-#define AMDGPU_CG_SUPPORT_GFX_CP_LS		(1 << 6)
-#define AMDGPU_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
-#define AMDGPU_CG_SUPPORT_MC_LS			(1 << 8)
-#define AMDGPU_CG_SUPPORT_MC_MGCG		(1 << 9)
-#define AMDGPU_CG_SUPPORT_SDMA_LS		(1 << 10)
-#define AMDGPU_CG_SUPPORT_SDMA_MGCG		(1 << 11)
-#define AMDGPU_CG_SUPPORT_BIF_LS		(1 << 12)
-#define AMDGPU_CG_SUPPORT_UVD_MGCG		(1 << 13)
-#define AMDGPU_CG_SUPPORT_VCE_MGCG		(1 << 14)
-#define AMDGPU_CG_SUPPORT_HDP_LS		(1 << 15)
-#define AMDGPU_CG_SUPPORT_HDP_MGCG		(1 << 16)
-
-/* PG flags */
-#define AMDGPU_PG_SUPPORT_GFX_PG		(1 << 0)
-#define AMDGPU_PG_SUPPORT_GFX_SMG		(1 << 1)
-#define AMDGPU_PG_SUPPORT_GFX_DMG		(1 << 2)
-#define AMDGPU_PG_SUPPORT_UVD			(1 << 3)
-#define AMDGPU_PG_SUPPORT_VCE			(1 << 4)
-#define AMDGPU_PG_SUPPORT_CP			(1 << 5)
-#define AMDGPU_PG_SUPPORT_GDS			(1 << 6)
-#define AMDGPU_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
-#define AMDGPU_PG_SUPPORT_SDMA			(1 << 8)
-#define AMDGPU_PG_SUPPORT_ACP			(1 << 9)
-#define AMDGPU_PG_SUPPORT_SAMU			(1 << 10)
-
 /* GFX current status */
 /* GFX current status */
 #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
 #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
 #define AMDGPU_GFX_SAFE_MODE			0x00000001L
 #define AMDGPU_GFX_SAFE_MODE			0x00000001L
@@ -606,8 +567,6 @@ struct amdgpu_sa_manager {
 	uint32_t		align;
 	uint32_t		align;
 };
 };
 
 
-struct amdgpu_sa_bo;
-
 /* sub-allocation buffer */
 /* sub-allocation buffer */
 struct amdgpu_sa_bo {
 struct amdgpu_sa_bo {
 	struct list_head		olist;
 	struct list_head		olist;
@@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 				     uint32_t flags);
 				     uint32_t flags);
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+				  unsigned long end);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
 				 struct ttm_mem_reg *mem);

+ 6 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c

@@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
 	case CGS_SYSTEM_INFO_PCIE_MLW:
 	case CGS_SYSTEM_INFO_PCIE_MLW:
 		sys_info->value = adev->pm.pcie_mlw_mask;
 		sys_info->value = adev->pm.pcie_mlw_mask;
 		break;
 		break;
+	case CGS_SYSTEM_INFO_CG_FLAGS:
+		sys_info->value = adev->cg_flags;
+		break;
+	case CGS_SYSTEM_INFO_PG_FLAGS:
+		sys_info->value = adev->pg_flags;
+		break;
 	default:
 	default:
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}

+ 91 - 69
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 	}
 	}
 
 
 	/* post card */
 	/* post card */
-	amdgpu_atom_asic_init(adev->mode_info.atom_context);
+	if (!amdgpu_card_posted(adev))
+		amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
 
 	r = amdgpu_resume(adev);
 	r = amdgpu_resume(adev);
+	if (r)
+		DRM_ERROR("amdgpu_resume failed (%d).\n", r);
 
 
 	amdgpu_fence_driver_resume(adev);
 	amdgpu_fence_driver_resume(adev);
 
 
-	r = amdgpu_ib_ring_tests(adev);
-	if (r)
-		DRM_ERROR("ib ring test failed (%d).\n", r);
+	if (resume) {
+		r = amdgpu_ib_ring_tests(adev);
+		if (r)
+			DRM_ERROR("ib ring test failed (%d).\n", r);
+	}
 
 
 	r = amdgpu_late_init(adev);
 	r = amdgpu_late_init(adev);
 	if (r)
 	if (r)
@@ -1933,80 +1938,97 @@ retry:
 	return r;
 	return r;
 }
 }
 
 
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007  /* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
+
 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
 {
 {
 	u32 mask;
 	u32 mask;
 	int ret;
 	int ret;
 
 
-	if (pci_is_root_bus(adev->pdev->bus))
-		return;
+	if (amdgpu_pcie_gen_cap)
+		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
 
 
-	if (amdgpu_pcie_gen2 == 0)
-		return;
+	if (amdgpu_pcie_lane_cap)
+		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
 
 
-	if (adev->flags & AMD_IS_APU)
+	/* covers APUs as well */
+	if (pci_is_root_bus(adev->pdev->bus)) {
+		if (adev->pm.pcie_gen_mask == 0)
+			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+		if (adev->pm.pcie_mlw_mask == 0)
+			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
 		return;
 		return;
+	}
 
 
-	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
-	if (!ret) {
-		adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
-					  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
-					  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
-		if (mask & DRM_PCIE_SPEED_25)
-			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
-		if (mask & DRM_PCIE_SPEED_50)
-			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
-		if (mask & DRM_PCIE_SPEED_80)
-			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
-	}
-	ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
-	if (!ret) {
-		switch (mask) {
-		case 32:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 16:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 12:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 8:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 4:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 2:
-			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
-						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
-			break;
-		case 1:
-			adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
-			break;
-		default:
-			break;
+	if (adev->pm.pcie_gen_mask == 0) {
+		ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+		if (!ret) {
+			adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+
+			if (mask & DRM_PCIE_SPEED_25)
+				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+			if (mask & DRM_PCIE_SPEED_50)
+				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
+			if (mask & DRM_PCIE_SPEED_80)
+				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
+		} else {
+			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+		}
+	}
+	if (adev->pm.pcie_mlw_mask == 0) {
+		ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
+		if (!ret) {
+			switch (mask) {
+			case 32:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 16:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 12:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 8:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 4:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 2:
+				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+				break;
+			case 1:
+				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+				break;
+			default:
+				break;
+			}
+		} else {
+			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
 		}
 		}
 	}
 	}
 }
 }

+ 8 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_enable_semaphores = 0;
 int amdgpu_enable_semaphores = 0;
 int amdgpu_powerplay = -1;
 int amdgpu_powerplay = -1;
+unsigned amdgpu_pcie_gen_cap = 0;
+unsigned amdgpu_pcie_lane_cap = 0;
 
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
 module_param_named(powerplay, amdgpu_powerplay, int, 0444);
 module_param_named(powerplay, amdgpu_powerplay, int, 0444);
 #endif
 #endif
 
 
+MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
+module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+
+MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
+module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+
 static struct pci_device_id pciidlist[] = {
 static struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
 	/* Kaveri */

+ 2 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 
 
 		list_for_each_entry(bo, &node->bos, mn_list) {
 		list_for_each_entry(bo, &node->bos, mn_list) {
 
 
-			if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
+							  end))
 				continue;
 				continue;
 
 
 			r = amdgpu_bo_reserve(bo, true);
 			r = amdgpu_bo_reserve(bo, true);

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c

@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 
 
 		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
 		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
 			if (fences[i])
 			if (fences[i])
-				fences[count++] = fences[i];
+				fences[count++] = fence_get(fences[i]);
 
 
 		if (count) {
 		if (count) {
 			spin_unlock(&sa_manager->wq.lock);
 			spin_unlock(&sa_manager->wq.lock);
 			t = fence_wait_any_timeout(fences, count, false,
 			t = fence_wait_any_timeout(fences, count, false,
 						   MAX_SCHEDULE_TIMEOUT);
 						   MAX_SCHEDULE_TIMEOUT);
+			for (i = 0; i < count; ++i)
+				fence_put(fences[i]);
+
 			r = (t > 0) ? 0 : t;
 			r = (t > 0) ? 0 : t;
 			spin_lock(&sa_manager->wq.lock);
 			spin_lock(&sa_manager->wq.lock);
 		} else {
 		} else {

+ 19 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
 	return !!gtt->userptr;
 	return !!gtt->userptr;
 }
 }
 
 
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+				  unsigned long end)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	unsigned long size;
+
+	if (gtt == NULL)
+		return false;
+
+	if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+		return false;
+
+	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+	if (gtt->userptr > end || gtt->userptr + size <= start)
+		return false;
+
+	return true;
+}
+
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 {
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;

+ 5 - 6
drivers/gpu/drm/amd/amdgpu/ci_dpm.c

@@ -31,6 +31,7 @@
 #include "ci_dpm.h"
 #include "ci_dpm.h"
 #include "gfx_v7_0.h"
 #include "gfx_v7_0.h"
 #include "atom.h"
 #include "atom.h"
+#include "amd_pcie.h"
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 
 
 #include "smu/smu_7_0_1_d.h"
 #include "smu/smu_7_0_1_d.h"
@@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev)
 	u8 frev, crev;
 	u8 frev, crev;
 	struct ci_power_info *pi;
 	struct ci_power_info *pi;
 	int ret;
 	int ret;
-	u32 mask;
 
 
 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
 	if (pi == NULL)
 	if (pi == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 	adev->pm.dpm.priv = pi;
 	adev->pm.dpm.priv = pi;
 
 
-	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
-	if (ret)
-		pi->sys_pcie_mask = 0;
-	else
-		pi->sys_pcie_mask = mask;
+	pi->sys_pcie_mask =
+		(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+		CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+
 	pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
 	pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
 
 
 	pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
 	pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;

+ 80 - 77
drivers/gpu/drm/amd/amdgpu/cik.c

@@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev)
 	if (amdgpu_aspm == 0)
 	if (amdgpu_aspm == 0)
 		return;
 		return;
 
 
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
 	/* XXX double check APUs */
 	/* XXX double check APUs */
 	if (adev->flags & AMD_IS_APU)
 	if (adev->flags & AMD_IS_APU)
 		return;
 		return;
@@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle)
 	switch (adev->asic_type) {
 	switch (adev->asic_type) {
 	case CHIP_BONAIRE:
 	case CHIP_BONAIRE:
 		adev->cg_flags =
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_MC_LS |
-			AMDGPU_CG_SUPPORT_MC_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags = 0;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 		break;
 	case CHIP_HAWAII:
 	case CHIP_HAWAII:
 		adev->cg_flags =
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_MC_LS |
-			AMDGPU_CG_SUPPORT_MC_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags = 0;
 		adev->pg_flags = 0;
 		adev->external_rev_id = 0x28;
 		adev->external_rev_id = 0x28;
 		break;
 		break;
 	case CHIP_KAVERI:
 	case CHIP_KAVERI:
 		adev->cg_flags =
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags =
 		adev->pg_flags =
-			/*AMDGPU_PG_SUPPORT_GFX_PG |
-			  AMDGPU_PG_SUPPORT_GFX_SMG |
-			  AMDGPU_PG_SUPPORT_GFX_DMG |*/
-			AMDGPU_PG_SUPPORT_UVD |
-			/*AMDGPU_PG_SUPPORT_VCE |
-			  AMDGPU_PG_SUPPORT_CP |
-			  AMDGPU_PG_SUPPORT_GDS |
-			  AMDGPU_PG_SUPPORT_RLC_SMU_HS |
-			  AMDGPU_PG_SUPPORT_ACP |
-			  AMDGPU_PG_SUPPORT_SAMU |*/
+			/*AMD_PG_SUPPORT_GFX_PG |
+			  AMD_PG_SUPPORT_GFX_SMG |
+			  AMD_PG_SUPPORT_GFX_DMG |*/
+			AMD_PG_SUPPORT_UVD |
+			/*AMD_PG_SUPPORT_VCE |
+			  AMD_PG_SUPPORT_CP |
+			  AMD_PG_SUPPORT_GDS |
+			  AMD_PG_SUPPORT_RLC_SMU_HS |
+			  AMD_PG_SUPPORT_ACP |
+			  AMD_PG_SUPPORT_SAMU |*/
 			0;
 			0;
 		if (adev->pdev->device == 0x1312 ||
 		if (adev->pdev->device == 0x1312 ||
 			adev->pdev->device == 0x1316 ||
 			adev->pdev->device == 0x1316 ||
@@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle)
 	case CHIP_KABINI:
 	case CHIP_KABINI:
 	case CHIP_MULLINS:
 	case CHIP_MULLINS:
 		adev->cg_flags =
 		adev->cg_flags =
-			AMDGPU_CG_SUPPORT_GFX_MGCG |
-			AMDGPU_CG_SUPPORT_GFX_MGLS |
-			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
-			AMDGPU_CG_SUPPORT_GFX_CGLS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS |
-			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
-			AMDGPU_CG_SUPPORT_GFX_CP_LS |
-			AMDGPU_CG_SUPPORT_SDMA_MGCG |
-			AMDGPU_CG_SUPPORT_SDMA_LS |
-			AMDGPU_CG_SUPPORT_BIF_LS |
-			AMDGPU_CG_SUPPORT_VCE_MGCG |
-			AMDGPU_CG_SUPPORT_UVD_MGCG |
-			AMDGPU_CG_SUPPORT_HDP_LS |
-			AMDGPU_CG_SUPPORT_HDP_MGCG;
+			AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			/*AMD_CG_SUPPORT_GFX_CGCG |*/
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_CGTS |
+			AMD_CG_SUPPORT_GFX_CGTS_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_HDP_MGCG;
 		adev->pg_flags =
 		adev->pg_flags =
-			/*AMDGPU_PG_SUPPORT_GFX_PG |
-			  AMDGPU_PG_SUPPORT_GFX_SMG | */
-			AMDGPU_PG_SUPPORT_UVD |
-			/*AMDGPU_PG_SUPPORT_VCE |
-			  AMDGPU_PG_SUPPORT_CP |
-			  AMDGPU_PG_SUPPORT_GDS |
-			  AMDGPU_PG_SUPPORT_RLC_SMU_HS |
-			  AMDGPU_PG_SUPPORT_SAMU |*/
+			/*AMD_PG_SUPPORT_GFX_PG |
+			  AMD_PG_SUPPORT_GFX_SMG | */
+			AMD_PG_SUPPORT_UVD |
+			/*AMD_PG_SUPPORT_VCE |
+			  AMD_PG_SUPPORT_CP |
+			  AMD_PG_SUPPORT_GDS |
+			  AMD_PG_SUPPORT_RLC_SMU_HS |
+			  AMD_PG_SUPPORT_SAMU |*/
 			0;
 			0;
 		if (adev->asic_type == CHIP_KABINI) {
 		if (adev->asic_type == CHIP_KABINI) {
 			if (adev->rev_id == 0)
 			if (adev->rev_id == 0)

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/cik_sdma.c

@@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 {
 {
 	u32 orig, data;
 	u32 orig, data;
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
 		WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
 		WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
 		WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
 		WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
 	} else {
 	} else {
@@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
 {
 {
 	u32 orig, data;
 	u32 orig, data;
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
 		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 		data |= 0x100;
 		data |= 0x100;
 		if (orig != data)
 		if (orig != data)

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/cz_dpm.c

@@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev)
 	pi->gfx_pg_threshold = 500;
 	pi->gfx_pg_threshold = 500;
 	pi->caps_fps = true;
 	pi->caps_fps = true;
 	/* uvd */
 	/* uvd */
-	pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+	pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
 	pi->caps_uvd_dpm = true;
 	pi->caps_uvd_dpm = true;
 	/* vce */
 	/* vce */
-	pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
+	pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
 	pi->caps_vce_dpm = true;
 	pi->caps_vce_dpm = true;
 	/* acp */
 	/* acp */
-	pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+	pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
 	pi->caps_acp_dpm = true;
 	pi->caps_acp_dpm = true;
 
 
 	pi->caps_stable_power_state = false;
 	pi->caps_stable_power_state = false;

+ 35 - 35
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c

@@ -4109,7 +4109,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
 
 
 	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
 		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
 		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
 
 
 		tmp = gfx_v7_0_halt_rlc(adev);
 		tmp = gfx_v7_0_halt_rlc(adev);
@@ -4147,9 +4147,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
 {
 {
 	u32 data, orig, tmp = 0;
 	u32 data, orig, tmp = 0;
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) {
-		if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) {
-			if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
 				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
 				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
 				if (orig != data)
 				if (orig != data)
@@ -4176,14 +4176,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
 
 
 		gfx_v7_0_update_rlc(adev, tmp);
 		gfx_v7_0_update_rlc(adev, tmp);
 
 
-		if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) {
+		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
 			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
 			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
 			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
 			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
 			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
 			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
 			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
 			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
 			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
 			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
-			if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) &&
-			    (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS))
+			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
+			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
 				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
 				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
 			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
 			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
 			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
 			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
@@ -4249,7 +4249,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
 	u32 data, orig;
 	u32 data, orig;
 
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
 	else
 	else
 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
@@ -4263,7 +4263,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
 	u32 data, orig;
 	u32 data, orig;
 
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
 	else
 	else
 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
@@ -4276,7 +4276,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
 	u32 data, orig;
 	u32 data, orig;
 
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
 		data &= ~0x8000;
 		data &= ~0x8000;
 	else
 	else
 		data |= 0x8000;
 		data |= 0x8000;
@@ -4289,7 +4289,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
 	u32 data, orig;
 	u32 data, orig;
 
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
 		data &= ~0x2000;
 		data &= ~0x2000;
 	else
 	else
 		data |= 0x2000;
 		data |= 0x2000;
@@ -4370,7 +4370,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
 {
 {
 	u32 data, orig;
 	u32 data, orig;
 
 
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) {
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
 		orig = data = RREG32(mmRLC_PG_CNTL);
 		orig = data = RREG32(mmRLC_PG_CNTL);
 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
 		if (orig != data)
 		if (orig != data)
@@ -4442,7 +4442,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
 	u32 data, orig;
 	u32 data, orig;
 
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
 		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
 		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
 	else
 	else
 		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
 		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
@@ -4456,7 +4456,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
 	u32 data, orig;
 	u32 data, orig;
 
 
 	orig = data = RREG32(mmRLC_PG_CNTL);
 	orig = data = RREG32(mmRLC_PG_CNTL);
-	if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG))
+	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
 		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
 		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
 	else
 	else
 		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
 		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
@@ -4623,15 +4623,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
 
 
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
 {
 {
-	if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
-			      AMDGPU_PG_SUPPORT_GFX_SMG |
-			      AMDGPU_PG_SUPPORT_GFX_DMG |
-			      AMDGPU_PG_SUPPORT_CP |
-			      AMDGPU_PG_SUPPORT_GDS |
-			      AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
 		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
 		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
 		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
 		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
-		if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
 			gfx_v7_0_init_gfx_cgpg(adev);
 			gfx_v7_0_init_gfx_cgpg(adev);
 			gfx_v7_0_enable_cp_pg(adev, true);
 			gfx_v7_0_enable_cp_pg(adev, true);
 			gfx_v7_0_enable_gds_pg(adev, true);
 			gfx_v7_0_enable_gds_pg(adev, true);
@@ -4643,14 +4643,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
 
 
 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
 {
 {
-	if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
-			      AMDGPU_PG_SUPPORT_GFX_SMG |
-			      AMDGPU_PG_SUPPORT_GFX_DMG |
-			      AMDGPU_PG_SUPPORT_CP |
-			      AMDGPU_PG_SUPPORT_GDS |
-			      AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
 		gfx_v7_0_update_gfx_pg(adev, false);
 		gfx_v7_0_update_gfx_pg(adev, false);
-		if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
 			gfx_v7_0_enable_cp_pg(adev, false);
 			gfx_v7_0_enable_cp_pg(adev, false);
 			gfx_v7_0_enable_gds_pg(adev, false);
 			gfx_v7_0_enable_gds_pg(adev, false);
 		}
 		}
@@ -5527,14 +5527,14 @@ static int gfx_v7_0_set_powergating_state(void *handle,
 	if (state == AMD_PG_STATE_GATE)
 	if (state == AMD_PG_STATE_GATE)
 		gate = true;
 		gate = true;
 
 
-	if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
-			      AMDGPU_PG_SUPPORT_GFX_SMG |
-			      AMDGPU_PG_SUPPORT_GFX_DMG |
-			      AMDGPU_PG_SUPPORT_CP |
-			      AMDGPU_PG_SUPPORT_GDS |
-			      AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+			      AMD_PG_SUPPORT_GFX_SMG |
+			      AMD_PG_SUPPORT_GFX_DMG |
+			      AMD_PG_SUPPORT_CP |
+			      AMD_PG_SUPPORT_GDS |
+			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
 		gfx_v7_0_update_gfx_pg(adev, gate);
 		gfx_v7_0_update_gfx_pg(adev, gate);
-		if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
 			gfx_v7_0_enable_cp_pg(adev, gate);
 			gfx_v7_0_enable_cp_pg(adev, gate);
 			gfx_v7_0_enable_gds_pg(adev, gate);
 			gfx_v7_0_enable_gds_pg(adev, gate);
 		}
 		}

+ 5 - 5
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c

@@ -792,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
 
 
 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 		orig = data = RREG32(mc_cg_registers[i]);
 		orig = data = RREG32(mc_cg_registers[i]);
-		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
 			data |= mc_cg_ls_en[i];
 			data |= mc_cg_ls_en[i];
 		else
 		else
 			data &= ~mc_cg_ls_en[i];
 			data &= ~mc_cg_ls_en[i];
@@ -809,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
 
 
 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
 		orig = data = RREG32(mc_cg_registers[i]);
 		orig = data = RREG32(mc_cg_registers[i]);
-		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
 			data |= mc_cg_en[i];
 			data |= mc_cg_en[i];
 		else
 		else
 			data &= ~mc_cg_en[i];
 			data &= ~mc_cg_en[i];
@@ -825,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
 
 
 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -848,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
 
 
 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
 	else
 	else
 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -864,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
 
 
 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
 	else
 	else
 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);

+ 4 - 4
drivers/gpu/drm/amd/amdgpu/kv_dpm.c

@@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
 	pi->voltage_drop_t = 0;
 	pi->voltage_drop_t = 0;
 	pi->caps_sclk_throttle_low_notification = false;
 	pi->caps_sclk_throttle_low_notification = false;
 	pi->caps_fps = false; /* true? */
 	pi->caps_fps = false; /* true? */
-	pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+	pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
 	pi->caps_uvd_dpm = true;
 	pi->caps_uvd_dpm = true;
-	pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
-	pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false;
-	pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+	pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
+	pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
+	pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
 	pi->caps_stable_p_state = false;
 	pi->caps_stable_p_state = false;
 
 
 	ret = kv_parse_sys_info_table(adev);
 	ret = kv_parse_sys_info_table(adev);

+ 8 - 2
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c

@@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
 {
 {
 	u32 orig, data;
 	u32 orig, data;
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 		data = 0xfff;
 		data = 0xfff;
 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
@@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle,
 	bool gate = false;
 	bool gate = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+		return 0;
+
 	if (state == AMD_CG_STATE_GATE)
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
 		gate = true;
 
 
@@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle,
 	 * revisit this when there is a cleaner line between
 	 * revisit this when there is a cleaner line between
 	 * the smc and the hw blocks
 	 * the smc and the hw blocks
 	 */
 	 */
-	 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+		return 0;
 
 
 	if (state == AMD_PG_STATE_GATE) {
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v4_2_stop(adev);
 		uvd_v4_2_stop(adev);

+ 8 - 0
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c

@@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
 static int uvd_v5_0_set_clockgating_state(void *handle,
 static int uvd_v5_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 					  enum amd_clockgating_state state)
 {
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+		return 0;
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle,
 	 */
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE) {
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v5_0_stop(adev);
 		uvd_v5_0_stop(adev);
 		return 0;
 		return 0;

+ 5 - 2
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c

@@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
 	uvd_v6_0_mc_resume(adev);
 	uvd_v6_0_mc_resume(adev);
 
 
 	/* Set dynamic clock gating in S/W control mode */
 	/* Set dynamic clock gating in S/W control mode */
-	if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) {
+	if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
 		if (adev->flags & AMD_IS_APU)
 		if (adev->flags & AMD_IS_APU)
 			cz_set_uvd_clock_gating_branches(adev, false);
 			cz_set_uvd_clock_gating_branches(adev, false);
 		else
 		else
@@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 
 
-	if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG))
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 		return 0;
 		return 0;
 
 
 	if (enable) {
 	if (enable) {
@@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle,
 	 */
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE) {
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v6_0_stop(adev);
 		uvd_v6_0_stop(adev);
 		return 0;
 		return 0;

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c

@@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
 {
 {
 	bool sw_cg = false;
 	bool sw_cg = false;
 
 
-	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) {
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
 		if (sw_cg)
 		if (sw_cg)
 			vce_v2_0_set_sw_cg(adev, true);
 			vce_v2_0_set_sw_cg(adev, true);
 		else
 		else
@@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle,
 	 */
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE)
 	if (state == AMD_PG_STATE_GATE)
 		/* XXX do we need a vce_v2_0_stop()? */
 		/* XXX do we need a vce_v2_0_stop()? */
 		return 0;
 		return 0;

+ 5 - 2
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c

@@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 		WREG32_P(mmVCE_STATUS, 0, ~1);
 		WREG32_P(mmVCE_STATUS, 0, ~1);
 
 
 		/* Set Clock-Gating off */
 		/* Set Clock-Gating off */
-		if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)
+		if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
 			vce_v3_0_set_vce_sw_clock_gating(adev, false);
 			vce_v3_0_set_vce_sw_clock_gating(adev, false);
 
 
 		if (r) {
 		if (r) {
@@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 	int i;
 	int i;
 
 
-	if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG))
+	if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
 		return 0;
 		return 0;
 
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	mutex_lock(&adev->grbm_idx_mutex);
@@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle,
 	 */
 	 */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
+	if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
+		return 0;
+
 	if (state == AMD_PG_STATE_GATE)
 	if (state == AMD_PG_STATE_GATE)
 		/* XXX do we need a vce_v3_0_stop()? */
 		/* XXX do we need a vce_v3_0_stop()? */
 		return 0;
 		return 0;

+ 1 - 2
drivers/gpu/drm/amd/amdgpu/vi.c

@@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle)
 	case CHIP_STONEY:
 	case CHIP_STONEY:
 		adev->has_uvd = true;
 		adev->has_uvd = true;
 		adev->cg_flags = 0;
 		adev->cg_flags = 0;
-		/* Disable UVD pg */
-		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
+		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x1;
 		adev->external_rev_id = adev->rev_id + 0x1;
 		break;
 		break;
 	default:
 	default:

+ 32 - 0
drivers/gpu/drm/amd/include/amd_shared.h

@@ -85,6 +85,38 @@ enum amd_powergating_state {
 	AMD_PG_STATE_UNGATE,
 	AMD_PG_STATE_UNGATE,
 };
 };
 
 
+/* CG flags */
+#define AMD_CG_SUPPORT_GFX_MGCG			(1 << 0)
+#define AMD_CG_SUPPORT_GFX_MGLS			(1 << 1)
+#define AMD_CG_SUPPORT_GFX_CGCG			(1 << 2)
+#define AMD_CG_SUPPORT_GFX_CGLS			(1 << 3)
+#define AMD_CG_SUPPORT_GFX_CGTS			(1 << 4)
+#define AMD_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
+#define AMD_CG_SUPPORT_GFX_CP_LS		(1 << 6)
+#define AMD_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
+#define AMD_CG_SUPPORT_MC_LS			(1 << 8)
+#define AMD_CG_SUPPORT_MC_MGCG			(1 << 9)
+#define AMD_CG_SUPPORT_SDMA_LS			(1 << 10)
+#define AMD_CG_SUPPORT_SDMA_MGCG		(1 << 11)
+#define AMD_CG_SUPPORT_BIF_LS			(1 << 12)
+#define AMD_CG_SUPPORT_UVD_MGCG			(1 << 13)
+#define AMD_CG_SUPPORT_VCE_MGCG			(1 << 14)
+#define AMD_CG_SUPPORT_HDP_LS			(1 << 15)
+#define AMD_CG_SUPPORT_HDP_MGCG			(1 << 16)
+
+/* PG flags */
+#define AMD_PG_SUPPORT_GFX_PG			(1 << 0)
+#define AMD_PG_SUPPORT_GFX_SMG			(1 << 1)
+#define AMD_PG_SUPPORT_GFX_DMG			(1 << 2)
+#define AMD_PG_SUPPORT_UVD			(1 << 3)
+#define AMD_PG_SUPPORT_VCE			(1 << 4)
+#define AMD_PG_SUPPORT_CP			(1 << 5)
+#define AMD_PG_SUPPORT_GDS			(1 << 6)
+#define AMD_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
+#define AMD_PG_SUPPORT_SDMA			(1 << 8)
+#define AMD_PG_SUPPORT_ACP			(1 << 9)
+#define AMD_PG_SUPPORT_SAMU			(1 << 10)
+
 enum amd_pm_state_type {
 enum amd_pm_state_type {
 	/* not used for dpm */
 	/* not used for dpm */
 	POWER_STATE_TYPE_DEFAULT,
 	POWER_STATE_TYPE_DEFAULT,

+ 2 - 0
drivers/gpu/drm/amd/include/cgs_common.h

@@ -109,6 +109,8 @@ enum cgs_system_info_id {
 	CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
 	CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
 	CGS_SYSTEM_INFO_PCIE_GEN_INFO,
 	CGS_SYSTEM_INFO_PCIE_GEN_INFO,
 	CGS_SYSTEM_INFO_PCIE_MLW,
 	CGS_SYSTEM_INFO_PCIE_MLW,
+	CGS_SYSTEM_INFO_CG_FLAGS,
+	CGS_SYSTEM_INFO_PG_FLAGS,
 	CGS_SYSTEM_INFO_ID_MAXIMUM,
 	CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 };
 
 

+ 18 - 0
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c

@@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
 {
 {
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
 	uint32_t i;
 	uint32_t i;
+	struct cgs_system_info sys_info = {0};
+	int result;
 
 
 	cz_hwmgr->gfx_ramp_step = 256*25/100;
 	cz_hwmgr->gfx_ramp_step = 256*25/100;
 
 
@@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 				   PHM_PlatformCaps_DisableVoltageIsland);
 				   PHM_PlatformCaps_DisableVoltageIsland);
 
 
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_UVDPowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_VCEPowerGating);
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+	if (!result) {
+		if (sys_info.value & AMD_PG_SUPPORT_UVD)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_UVDPowerGating);
+		if (sys_info.value & AMD_PG_SUPPORT_VCE)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_VCEPowerGating);
+	}
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 17 - 2
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c

@@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 	pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
 	pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
 	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
 	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
 	phw_tonga_ulv_parm *ulv;
 	phw_tonga_ulv_parm *ulv;
+	struct cgs_system_info sys_info = {0};
 
 
 	PP_ASSERT_WITH_CODE((NULL != hwmgr),
 	PP_ASSERT_WITH_CODE((NULL != hwmgr),
 		"Invalid Parameter!", return -1;);
 		"Invalid Parameter!", return -1;);
@@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
 
 	data->vddc_phase_shed_control = 0;
 	data->vddc_phase_shed_control = 0;
 
 
-	if (0 == result) {
-		struct cgs_system_info sys_info = {0};
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_UVDPowerGating);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+		      PHM_PlatformCaps_VCEPowerGating);
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+	result = cgs_query_system_info(hwmgr->device, &sys_info);
+	if (!result) {
+		if (sys_info.value & AMD_PG_SUPPORT_UVD)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_UVDPowerGating);
+		if (sys_info.value & AMD_PG_SUPPORT_VCE)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+				      PHM_PlatformCaps_VCEPowerGating);
+	}
 
 
+	if (0 == result) {
 		data->is_tlu_enabled = 0;
 		data->is_tlu_enabled = 0;
 		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
 		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
 			TONGA_MAX_HARDWARE_POWERLEVELS;
 			TONGA_MAX_HARDWARE_POWERLEVELS;

+ 5 - 0
drivers/gpu/drm/radeon/radeon_sa.c

@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 			/* see if we can skip over some allocations */
 			/* see if we can skip over some allocations */
 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 
 
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_fence_ref(fences[i]);
+
 		spin_unlock(&sa_manager->wq.lock);
 		spin_unlock(&sa_manager->wq.lock);
 		r = radeon_fence_wait_any(rdev, fences, false);
 		r = radeon_fence_wait_any(rdev, fences, false);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i)
+			radeon_fence_unref(&fences[i]);
 		spin_lock(&sa_manager->wq.lock);
 		spin_lock(&sa_manager->wq.lock);
 		/* if we have nothing to wait for block */
 		/* if we have nothing to wait for block */
 		if (r == -ENOENT) {
 		if (r == -ENOENT) {

+ 2 - 5
drivers/infiniband/core/sysfs.c

@@ -336,7 +336,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
 	union ib_gid gid;
 	union ib_gid gid;
 	struct ib_gid_attr gid_attr = {};
 	struct ib_gid_attr gid_attr = {};
 	ssize_t ret;
 	ssize_t ret;
-	va_list args;
 
 
 	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
 	ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
 			   &gid_attr);
 			   &gid_attr);
@@ -348,7 +347,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
 err:
 err:
 	if (gid_attr.ndev)
 	if (gid_attr.ndev)
 		dev_put(gid_attr.ndev);
 		dev_put(gid_attr.ndev);
-	va_end(args);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -722,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
 
 
 	if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
 	if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
 				&cpi, 40, sizeof(cpi)) >= 0) {
 				&cpi, 40, sizeof(cpi)) >= 0) {
-
-		if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
+		if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
 			/* We have extended counters */
 			/* We have extended counters */
 			return &pma_group_ext;
 			return &pma_group_ext;
 
 
-		if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
+		if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
 			/* But not the IETF ones */
 			/* But not the IETF ones */
 			return &pma_group_noietf;
 			return &pma_group_noietf;
 	}
 	}

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно