Browse Source

Merge branch 'linus' into sched/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 8 years ago
parent
commit
0acbc7aa47
100 changed files with 704 additions and 344 deletions
  1. 2 2
      Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
  2. 5 0
      Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
  3. 8 3
      Documentation/devicetree/bindings/pci/rockchip-pcie.txt
  4. 5 5
      Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
  5. 0 1
      Documentation/filesystems/Locking
  6. 0 1
      Documentation/filesystems/vfs.txt
  7. 2 1
      Documentation/networking/dsa/dsa.txt
  8. 2 1
      MAINTAINERS
  9. 7 5
      Makefile
  10. 6 1
      arch/arc/Makefile
  11. 1 1
      arch/arc/boot/dts/axc001.dtsi
  12. 1 1
      arch/arc/boot/dts/nsim_700.dts
  13. 4 0
      arch/arc/boot/dts/nsimosci.dts
  14. 1 0
      arch/arc/configs/nsim_700_defconfig
  15. 1 0
      arch/arc/configs/nsim_hs_defconfig
  16. 1 0
      arch/arc/configs/nsim_hs_smp_defconfig
  17. 1 0
      arch/arc/configs/nsimosci_defconfig
  18. 1 0
      arch/arc/configs/nsimosci_hs_defconfig
  19. 1 2
      arch/arc/configs/nsimosci_hs_smp_defconfig
  20. 2 0
      arch/arc/include/asm/arcregs.h
  21. 2 2
      arch/arc/include/asm/smp.h
  22. 2 0
      arch/arc/kernel/devtree.c
  23. 20 12
      arch/arc/kernel/mcip.c
  24. 11 9
      arch/arc/kernel/process.c
  25. 15 8
      arch/arc/kernel/smp.c
  26. 11 8
      arch/arc/kernel/time.c
  27. 26 0
      arch/arc/mm/dma.c
  28. 0 6
      arch/arc/plat-eznps/smp.c
  29. 1 0
      arch/arm/include/asm/kvm_asm.h
  30. 3 0
      arch/arm/include/asm/kvm_host.h
  31. 1 0
      arch/arm/include/asm/kvm_hyp.h
  32. 26 1
      arch/arm/kvm/arm.c
  33. 15 0
      arch/arm/kvm/hyp/tlb.c
  34. 5 2
      arch/arm64/boot/dts/rockchip/rk3399.dtsi
  35. 1 0
      arch/arm64/include/asm/kvm_asm.h
  36. 3 0
      arch/arm64/include/asm/kvm_host.h
  37. 1 1
      arch/arm64/include/asm/kvm_mmu.h
  38. 15 0
      arch/arm64/kvm/hyp/tlb.c
  39. 1 0
      arch/nios2/kernel/time.c
  40. 2 0
      arch/s390/kernel/vmlinux.lds.S
  41. 1 1
      arch/s390/pci/pci_dma.c
  42. 3 0
      arch/tile/include/asm/cache.h
  43. 2 2
      arch/x86/crypto/aesni-intel_glue.c
  44. 28 4
      arch/x86/events/intel/uncore_snb.c
  45. 1 0
      arch/x86/include/asm/intel-mid.h
  46. 4 1
      arch/x86/kernel/apm_32.c
  47. 1 5
      arch/x86/kernel/cpu/amd.c
  48. 30 2
      arch/x86/kernel/cpu/common.c
  49. 1 1
      arch/x86/platform/efi/efi.c
  50. 57 23
      arch/x86/platform/efi/efi_64.c
  51. 19 0
      arch/x86/platform/intel-mid/pwr.c
  52. 2 8
      drivers/acpi/acpi_apd.c
  53. 2 8
      drivers/acpi/acpi_lpss.c
  54. 4 1
      drivers/acpi/acpi_platform.c
  55. 2 2
      drivers/acpi/dptf/int340x_thermal.c
  56. 1 1
      drivers/acpi/scan.c
  57. 3 2
      drivers/base/dd.c
  58. 4 4
      drivers/base/power/main.c
  59. 0 41
      drivers/block/aoe/aoecmd.c
  60. 1 1
      drivers/block/drbd/drbd_main.c
  61. 1 1
      drivers/block/nbd.c
  62. 0 3
      drivers/char/ppdev.c
  63. 8 5
      drivers/clk/clk-qoriq.c
  64. 4 6
      drivers/clk/clk-xgene.c
  65. 6 2
      drivers/clk/imx/clk-pllv3.c
  66. 1 1
      drivers/clk/mmp/clk-of-mmp2.c
  67. 1 1
      drivers/clk/mmp/clk-of-pxa168.c
  68. 2 2
      drivers/clk/mmp/clk-of-pxa910.c
  69. 1 4
      drivers/clk/rockchip/clk-ddr.c
  70. 14 8
      drivers/clk/samsung/clk-exynos-clkout.c
  71. 4 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
  72. 11 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
  73. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
  74. 24 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  75. 2 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
  76. 2 0
      drivers/gpu/drm/amd/amdgpu/vi.c
  77. 1 1
      drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
  78. 4 2
      drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
  79. 45 25
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
  80. 3 3
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
  81. 0 13
      drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
  82. 3 3
      drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
  83. 19 0
      drivers/gpu/drm/amd/scheduler/sched_fence.c
  84. 17 3
      drivers/gpu/drm/i915/i915_gem.c
  85. 26 3
      drivers/gpu/drm/i915/intel_display.c
  86. 48 36
      drivers/gpu/drm/i915/intel_hdmi.c
  87. 3 1
      drivers/gpu/drm/i915/intel_runtime_pm.c
  88. 6 3
      drivers/gpu/drm/imx/ipuv3-crtc.c
  89. 12 2
      drivers/gpu/drm/msm/dsi/dsi_host.c
  90. 1 0
      drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
  91. 1 0
      drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
  92. 1 0
      drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
  93. 1 0
      drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
  94. 2 2
      drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
  95. 28 18
      drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
  96. 3 6
      drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
  97. 1 1
      drivers/gpu/drm/msm/msm_drv.c
  98. 5 2
      drivers/gpu/drm/msm/msm_gem_shrinker.c
  99. 1 1
      drivers/gpu/drm/radeon/radeon_connectors.c
  100. 13 0
      drivers/gpu/drm/radeon/radeon_device.c

+ 2 - 2
Documentation/ABI/testing/sysfs-devices-system-ibm-rtl

@@ -1,4 +1,4 @@
-What:           state
+What:           /sys/devices/system/ibm_rtl/state
 Date:           Sep 2010
 Date:           Sep 2010
 KernelVersion:  2.6.37
 KernelVersion:  2.6.37
 Contact:        Vernon Mauery <vernux@us.ibm.com>
 Contact:        Vernon Mauery <vernux@us.ibm.com>
@@ -10,7 +10,7 @@ Description:    The state file allows a means by which to change in and
 Users:          The ibm-prtm userspace daemon uses this interface.
 Users:          The ibm-prtm userspace daemon uses this interface.
 
 
 
 
-What:           version
+What:           /sys/devices/system/ibm_rtl/version
 Date:           Sep 2010
 Date:           Sep 2010
 KernelVersion:  2.6.37
 KernelVersion:  2.6.37
 Contact:        Vernon Mauery <vernux@us.ibm.com>
 Contact:        Vernon Mauery <vernux@us.ibm.com>

+ 5 - 0
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt

@@ -43,6 +43,9 @@ Optional properties:
   reset signal present internally in some host controller IC designs.
   reset signal present internally in some host controller IC designs.
   See Documentation/devicetree/bindings/reset/reset.txt for details.
   See Documentation/devicetree/bindings/reset/reset.txt for details.
 
 
+* reset-names: request name for using "resets" property. Must be "reset".
+	(It will be used together with "resets" property.)
+
 * clocks: from common clock binding: handle to biu and ciu clocks for the
 * clocks: from common clock binding: handle to biu and ciu clocks for the
   bus interface unit clock and the card interface unit clock.
   bus interface unit clock and the card interface unit clock.
 
 
@@ -103,6 +106,8 @@ board specific portions as listed below.
 		interrupts = <0 75 0>;
 		interrupts = <0 75 0>;
 		#address-cells = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		#size-cells = <0>;
+		resets = <&rst 20>;
+		reset-names = "reset";
 	};
 	};
 
 
 [board specific internal DMA resources]
 [board specific internal DMA resources]

+ 8 - 3
Documentation/devicetree/bindings/pci/rockchip-pcie.txt

@@ -26,13 +26,16 @@ Required properties:
 	- "sys"
 	- "sys"
 	- "legacy"
 	- "legacy"
 	- "client"
 	- "client"
-- resets: Must contain five entries for each entry in reset-names.
+- resets: Must contain seven entries for each entry in reset-names.
 	   See ../reset/reset.txt for details.
 	   See ../reset/reset.txt for details.
 - reset-names: Must include the following names
 - reset-names: Must include the following names
 	- "core"
 	- "core"
 	- "mgmt"
 	- "mgmt"
 	- "mgmt-sticky"
 	- "mgmt-sticky"
 	- "pipe"
 	- "pipe"
+	- "pm"
+	- "aclk"
+	- "pclk"
 - pinctrl-names : The pin control state names
 - pinctrl-names : The pin control state names
 - pinctrl-0: The "default" pinctrl state
 - pinctrl-0: The "default" pinctrl state
 - #interrupt-cells: specifies the number of cells needed to encode an
 - #interrupt-cells: specifies the number of cells needed to encode an
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
 	reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
 	reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
 	reg-names = "axi-base", "apb-base";
 	reg-names = "axi-base", "apb-base";
 	resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
 	resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
-		 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
-	reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+		 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+		 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+	reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+		      "pm", "pclk", "aclk";
 	phys = <&pcie_phy>;
 	phys = <&pcie_phy>;
 	phy-names = "pcie-phy";
 	phy-names = "pcie-phy";
 	pinctrl-names = "default";
 	pinctrl-names = "default";

+ 5 - 5
Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt

@@ -14,11 +14,6 @@ Required properies:
  - #size-cells	: The value of this property must be 1
  - #size-cells	: The value of this property must be 1
  - ranges	: defines mapping between pin controller node (parent) to
  - ranges	: defines mapping between pin controller node (parent) to
    gpio-bank node (children).
    gpio-bank node (children).
- - interrupt-parent: phandle of the interrupt parent to which the external
-   GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
-   which includes IRQ mux selection register, and the offset of the IRQ mux
-   selection register.
  - pins-are-numbered: Specify the subnodes are using numbered pinmux to
  - pins-are-numbered: Specify the subnodes are using numbered pinmux to
    specify pins.
    specify pins.
 
 
@@ -37,6 +32,11 @@ Required properties:
 
 
 Optional properties:
 Optional properties:
  - reset:	  : Reference to the reset controller
  - reset:	  : Reference to the reset controller
+ - interrupt-parent: phandle of the interrupt parent to which the external
+   GPIO interrupts are forwarded to.
+ - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
+   which includes IRQ mux selection register, and the offset of the IRQ mux
+   selection register.
 
 
 Example:
 Example:
 #include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
 #include <dt-bindings/pinctrl/stm32f429-pinfunc.h>

+ 0 - 1
Documentation/filesystems/Locking

@@ -447,7 +447,6 @@ prototypes:
 	int (*flush) (struct file *);
 	int (*flush) (struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
 	int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
 	int (*fasync) (int, struct file *, int);
 	int (*fasync) (int, struct file *, int);
 	int (*lock) (struct file *, int, struct file_lock *);
 	int (*lock) (struct file *, int, struct file_lock *);
 	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
 	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,

+ 0 - 1
Documentation/filesystems/vfs.txt

@@ -828,7 +828,6 @@ struct file_operations {
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*release) (struct inode *, struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*fsync) (struct file *, loff_t, loff_t, int datasync);
 	int (*fsync) (struct file *, loff_t, loff_t, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
 	int (*fasync) (int, struct file *, int);
 	int (*fasync) (int, struct file *, int);
 	int (*lock) (struct file *, int, struct file_lock *);
 	int (*lock) (struct file *, int, struct file_lock *);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);

+ 2 - 1
Documentation/networking/dsa/dsa.txt

@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
 Switch tagging protocols
 Switch tagging protocols
 ------------------------
 ------------------------
 
 
-DSA currently supports 4 different tagging protocols, and a tag-less mode as
+DSA currently supports 5 different tagging protocols, and a tag-less mode as
 well. The different protocols are implemented in:
 well. The different protocols are implemented in:
 
 
 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
 net/dsa/tag_dsa.c: Marvell's original DSA tag
 net/dsa/tag_dsa.c: Marvell's original DSA tag
 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
 
 
 The exact format of the tag protocol is vendor specific, but in general, they
 The exact format of the tag protocol is vendor specific, but in general, they
 all contain something which:
 all contain something which:

+ 2 - 1
MAINTAINERS

@@ -8057,6 +8057,7 @@ F:	drivers/infiniband/hw/mlx4/
 F:	include/linux/mlx4/
 F:	include/linux/mlx4/
 
 
 MELLANOX MLX5 core VPI driver
 MELLANOX MLX5 core VPI driver
+M:	Saeed Mahameed <saeedm@mellanox.com>
 M:	Matan Barak <matanb@mellanox.com>
 M:	Matan Barak <matanb@mellanox.com>
 M:	Leon Romanovsky <leonro@mellanox.com>
 M:	Leon Romanovsky <leonro@mellanox.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
@@ -9335,7 +9336,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
 M:	Keith Busch <keith.busch@intel.com>
 M:	Keith Busch <keith.busch@intel.com>
 L:	linux-pci@vger.kernel.org
 L:	linux-pci@vger.kernel.org
 S:	Supported
 S:	Supported
-F:	arch/x86/pci/vmd.c
+F:	drivers/pci/host/vmd.c
 
 
 PCIE DRIVER FOR ST SPEAR13XX
 PCIE DRIVER FOR ST SPEAR13XX
 M:	Pratyush Anand <pratyush.anand@gmail.com>
 M:	Pratyush Anand <pratyush.anand@gmail.com>

+ 7 - 5
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 9
 PATCHLEVEL = 9
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Psychotic Stoned Sheep
 NAME = Psychotic Stoned Sheep
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -370,7 +370,7 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL	=
 CFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 LDFLAGS_vmlinux =
 LDFLAGS_vmlinux =
-CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
 CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
 CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
 
 
 
 
@@ -620,7 +620,6 @@ ARCH_CFLAGS :=
 include arch/$(SRCARCH)/Makefile
 include arch/$(SRCARCH)/Makefile
 
 
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
-KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
 
 
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -629,15 +628,18 @@ KBUILD_CFLAGS	+= $(call cc-option,-fdata-sections,)
 endif
 endif
 
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= -Os
+KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
 else
 else
 ifdef CONFIG_PROFILE_ALL_BRANCHES
 ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS	+= -O2
+KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,)
 else
 else
 KBUILD_CFLAGS   += -O2
 KBUILD_CFLAGS   += -O2
 endif
 endif
 endif
 endif
 
 
+KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
+			$(call cc-disable-warning,maybe-uninitialized,))
+
 # Tell gcc to never replace conditional load with a non-conditional one
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
 
 

+ 6 - 1
arch/arc/Makefile

@@ -50,6 +50,9 @@ atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
 
 
 cflags-$(atleast_gcc44)			+= -fsection-anchors
 cflags-$(atleast_gcc44)			+= -fsection-anchors
 
 
+cflags-$(CONFIG_ARC_HAS_LLSC)		+= -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE)		+= -mswape
+
 ifdef CONFIG_ISA_ARCV2
 ifdef CONFIG_ISA_ARCV2
 
 
 ifndef CONFIG_ARC_HAS_LL64
 ifndef CONFIG_ARC_HAS_LL64
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND)		+= -fasynchronous-unwind-tables $(cfi)
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 # Generic build system uses -O2, we want -O3
 # Generic build system uses -O2, we want -O3
 # Note: No need to add to cflags-y as that happens anyways
 # Note: No need to add to cflags-y as that happens anyways
-ARCH_CFLAGS += -O3
+#
+# Disable the false maybe-uninitialized warings gcc spits out at -O3
+ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
 endif
 endif
 
 
 # small data is default for elf32 tool-chain. If not usable, disable it
 # small data is default for elf32 tool-chain. If not usable, disable it

+ 1 - 1
arch/arc/boot/dts/axc001.dtsi

@@ -71,7 +71,7 @@
 			reg-io-width = <4>;
 			reg-io-width = <4>;
 		};
 		};
 
 
-		arcpmu0: pmu {
+		arcpct0: pct {
 			compatible = "snps,arc700-pct";
 			compatible = "snps,arc700-pct";
 		};
 		};
 	};
 	};

+ 1 - 1
arch/arc/boot/dts/nsim_700.dts

@@ -69,7 +69,7 @@
 			};
 			};
 		};
 		};
 
 
-		arcpmu0: pmu {
+		arcpct0: pct {
 			compatible = "snps,arc700-pct";
 			compatible = "snps,arc700-pct";
 		};
 		};
 	};
 	};

+ 4 - 0
arch/arc/boot/dts/nsimosci.dts

@@ -83,5 +83,9 @@
 			reg = <0xf0003000 0x44>;
 			reg = <0xf0003000 0x44>;
 			interrupts = <7>;
 			interrupts = <7>;
 		};
 		};
+
+		arcpct0: pct {
+			compatible = "snps,arc700-pct";
+		};
 	};
 	};
 };
 };

+ 1 - 0
arch/arc/configs/nsim_700_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsim_hs_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsim_hs_smp_defconfig

@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsimosci_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsimosci_hs_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 2
arch/arc/configs/nsimosci_hs_smp_defconfig

@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
 # CONFIG_PID_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HWMON is not set
 # CONFIG_HWMON is not set
 CONFIG_DRM=y
 CONFIG_DRM=y
 CONFIG_DRM_ARCPGU=y
 CONFIG_DRM_ARCPGU=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set

+ 2 - 0
arch/arc/include/asm/arcregs.h

@@ -43,12 +43,14 @@
 #define STATUS_AE_BIT		5	/* Exception active */
 #define STATUS_AE_BIT		5	/* Exception active */
 #define STATUS_DE_BIT		6	/* PC is in delay slot */
 #define STATUS_DE_BIT		6	/* PC is in delay slot */
 #define STATUS_U_BIT		7	/* User/Kernel mode */
 #define STATUS_U_BIT		7	/* User/Kernel mode */
+#define STATUS_Z_BIT            11
 #define STATUS_L_BIT		12	/* Loop inhibit */
 #define STATUS_L_BIT		12	/* Loop inhibit */
 
 
 /* These masks correspond to the status word(STATUS_32) bits */
 /* These masks correspond to the status word(STATUS_32) bits */
 #define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
 #define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
 #define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
 #define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
 #define STATUS_U_MASK		(1<<STATUS_U_BIT)
 #define STATUS_U_MASK		(1<<STATUS_U_BIT)
+#define STATUS_Z_MASK		(1<<STATUS_Z_BIT)
 #define STATUS_L_MASK		(1<<STATUS_L_BIT)
 #define STATUS_L_MASK		(1<<STATUS_L_BIT)
 
 
 /*
 /*

+ 2 - 2
arch/arc/include/asm/smp.h

@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
  * API expected BY platform smp code (FROM arch smp code)
  * API expected BY platform smp code (FROM arch smp code)
  *
  *
  * smp_ipi_irq_setup:
  * smp_ipi_irq_setup:
- *	Takes @cpu and @irq to which the arch-common ISR is hooked up
+ *	Takes @cpu and @hwirq to which the arch-common ISR is hooked up
  */
  */
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
 
 
 /*
 /*
  * struct plat_smp_ops	- SMP callbacks provided by platform to ARC SMP
  * struct plat_smp_ops	- SMP callbacks provided by platform to ARC SMP

+ 2 - 0
arch/arc/kernel/devtree.c

@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
 		arc_base_baud = 166666666;	/* Fixed 166.6MHz clk (TB10x) */
 		arc_base_baud = 166666666;	/* Fixed 166.6MHz clk (TB10x) */
 	else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
 	else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
 		arc_base_baud = 33333333;	/* Fixed 33MHz clk (AXS10x) */
 		arc_base_baud = 33333333;	/* Fixed 33MHz clk (AXS10x) */
+	else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
+		arc_base_baud = 800000000;      /* Fixed 800MHz clk (NPS) */
 	else
 	else
 		arc_base_baud = 50000000;	/* Fixed default 50MHz */
 		arc_base_baud = 50000000;	/* Fixed default 50MHz */
 }
 }

+ 20 - 12
arch/arc/kernel/mcip.c

@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	cpumask_t online;
 	cpumask_t online;
+	unsigned int destination_bits;
+	unsigned int distribution_mode;
 
 
 	/* errout if no online cpu per @cpumask */
 	/* errout if no online cpu per @cpumask */
 	if (!cpumask_and(&online, cpumask, cpu_online_mask))
 	if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 
 
 	raw_spin_lock_irqsave(&mcip_lock, flags);
 	raw_spin_lock_irqsave(&mcip_lock, flags);
 
 
-	idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
-	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+	destination_bits = cpumask_bits(&online)[0];
+	idu_set_dest(data->hwirq, destination_bits);
+
+	if (ffs(destination_bits) == fls(destination_bits))
+		distribution_mode = IDU_M_DISTRI_DEST;
+	else
+		distribution_mode = IDU_M_DISTRI_RR;
+
+	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
 
 
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 
 
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
 
 
 };
 };
 
 
-static int idu_first_irq;
+static irq_hw_number_t idu_first_hwirq;
 
 
 static void idu_cascade_isr(struct irq_desc *desc)
 static void idu_cascade_isr(struct irq_desc *desc)
 {
 {
-	struct irq_domain *domain = irq_desc_get_handler_data(desc);
-	unsigned int core_irq = irq_desc_get_irq(desc);
-	unsigned int idu_irq;
+	struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+	irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+	irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
 
 
-	idu_irq = core_irq - idu_first_irq;
-	generic_handle_irq(irq_find_mapping(domain, idu_irq));
+	generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
 }
 }
 
 
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
 	struct irq_domain *domain;
 	struct irq_domain *domain;
 	/* Read IDU BCR to confirm nr_irqs */
 	/* Read IDU BCR to confirm nr_irqs */
 	int nr_irqs = of_irq_count(intc);
 	int nr_irqs = of_irq_count(intc);
-	int i, irq;
+	int i, virq;
 	struct mcip_bcr mp;
 	struct mcip_bcr mp;
 
 
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
 		 * however we need it to get the parent virq and set IDU handler
 		 * however we need it to get the parent virq and set IDU handler
 		 * as first level isr
 		 * as first level isr
 		 */
 		 */
-		irq = irq_of_parse_and_map(intc, i);
+		virq = irq_of_parse_and_map(intc, i);
 		if (!i)
 		if (!i)
-			idu_first_irq = irq;
+			idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
 
 
-		irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
+		irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
 	}
 	}
 
 
 	__mcip_cmd(CMD_IDU_ENABLE, 0);
 	__mcip_cmd(CMD_IDU_ENABLE, 0);

+ 11 - 9
arch/arc/kernel/process.c

@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
 
 
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 {
 {
-	int uval;
-	int ret;
+	struct pt_regs *regs = current_pt_regs();
+	int uval = -EFAULT;
 
 
 	/*
 	/*
 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 	 */
 	 */
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
 
 
+	/* Z indicates to userspace if operation succeded */
+	regs->status32 &= ~STATUS_Z_MASK;
+
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	preempt_disable();
 	preempt_disable();
 
 
-	ret = __get_user(uval, uaddr);
-	if (ret)
+	if (__get_user(uval, uaddr))
 		goto done;
 		goto done;
 
 
-	if (uval != expected)
-		ret = -EAGAIN;
-	else
-		ret = __put_user(new, uaddr);
+	if (uval == expected) {
+		if (!__put_user(new, uaddr))
+			regs->status32 |= STATUS_Z_MASK;
+	}
 
 
 done:
 done:
 	preempt_enable();
 	preempt_enable();
 
 
-	return ret;
+	return uval;
 }
 }
 
 
 void arch_cpu_idle(void)
 void arch_cpu_idle(void)

+ 15 - 8
arch/arc/kernel/smp.c

@@ -22,6 +22,7 @@
 #include <linux/atomic.h>
 #include <linux/atomic.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
+#include <linux/irqdomain.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/mach_desc.h>
 #include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	int i;
 	int i;
 
 
 	/*
 	/*
-	 * Initialise the present map, which describes the set of CPUs
-	 * actually populated at the present time.
+	 * if platform didn't set the present map already, do it now
+	 * boot cpu is set to present already by init/main.c
 	 */
 	 */
-	for (i = 0; i < max_cpus; i++)
-		set_cpu_present(i, true);
+	if (num_present_cpus() <= 1) {
+		for (i = 0; i < max_cpus; i++)
+			set_cpu_present(i, true);
+	}
 }
 }
 
 
 void __init smp_cpus_done(unsigned int max_cpus)
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
  */
  */
 static DEFINE_PER_CPU(int, ipi_dev);
 static DEFINE_PER_CPU(int, ipi_dev);
 
 
-int smp_ipi_irq_setup(int cpu, int irq)
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
 {
 {
 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
+	unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+	if (!virq)
+		panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
 
 
 	/* Boot cpu calls request, all call enable */
 	/* Boot cpu calls request, all call enable */
 	if (!cpu) {
 	if (!cpu) {
 		int rc;
 		int rc;
 
 
-		rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+		rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
 		if (rc)
 		if (rc)
-			panic("Percpu IRQ request failed for %d\n", irq);
+			panic("Percpu IRQ request failed for %u\n", virq);
 	}
 	}
 
 
-	enable_percpu_irq(irq, 0);
+	enable_percpu_irq(virq, 0);
 
 
 	return 0;
 	return 0;
 }
 }

+ 11 - 8
arch/arc/kernel/time.c

@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
 		cycle_t  full;
 		cycle_t  full;
 	} stamp;
 	} stamp;
 
 
-
-	__asm__ __volatile(
-	"1:						\n"
-	"	lr		%0, [AUX_RTC_LOW]	\n"
-	"	lr		%1, [AUX_RTC_HIGH]	\n"
-	"	lr		%2, [AUX_RTC_CTRL]	\n"
-	"	bbit0.nt	%2, 31, 1b		\n"
-	: "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+	/*
+	 * hardware has an internal state machine which tracks readout of
+	 * low/high and updates the CTRL.status if
+	 *  - interrupt/exception taken between the two reads
+	 *  - high increments after low has been read
+	 */
+	do {
+		stamp.low = read_aux_reg(AUX_RTC_LOW);
+		stamp.high = read_aux_reg(AUX_RTC_HIGH);
+		status = read_aux_reg(AUX_RTC_CTRL);
+	} while (!(status & _BITUL(31)));
 
 
 	return stamp.full;
 	return stamp.full;
 }
 }

+ 26 - 0
arch/arc/mm/dma.c

@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
 	__free_pages(page, get_order(size));
 	__free_pages(page, get_order(size));
 }
 }
 
 
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			unsigned long attrs)
+{
+	unsigned long user_count = vma_pages(vma);
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+	unsigned long off = vma->vm_pgoff;
+	int ret = -ENXIO;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
+	if (off < count && user_count <= (count - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start,
+				      pfn + off,
+				      user_count << PAGE_SHIFT,
+				      vma->vm_page_prot);
+	}
+
+	return ret;
+}
+
 /*
 /*
  * streaming DMA Mapping API...
  * streaming DMA Mapping API...
  * CPU accesses page via normal paddr, thus needs to explicitly made
  * CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
 struct dma_map_ops arc_dma_ops = {
 struct dma_map_ops arc_dma_ops = {
 	.alloc			= arc_dma_alloc,
 	.alloc			= arc_dma_alloc,
 	.free			= arc_dma_free,
 	.free			= arc_dma_free,
+	.mmap			= arc_dma_mmap,
 	.map_page		= arc_dma_map_page,
 	.map_page		= arc_dma_map_page,
 	.map_sg			= arc_dma_map_sg,
 	.map_sg			= arc_dma_map_sg,
 	.sync_single_for_device	= arc_dma_sync_single_for_device,
 	.sync_single_for_device	= arc_dma_sync_single_for_device,

+ 0 - 6
arch/arc/plat-eznps/smp.c

@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
 	mtm_enable_core(cpu);
 	mtm_enable_core(cpu);
 }
 }
 
 
-static void eznps_ipi_clear(int irq)
-{
-	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
-}
-
 struct plat_smp_ops plat_smp_ops = {
 struct plat_smp_ops plat_smp_ops = {
 	.info		= smp_cpuinfo_buf,
 	.info		= smp_cpuinfo_buf,
 	.init_early_smp	= eznps_init_cpumasks,
 	.init_early_smp	= eznps_init_cpumasks,
 	.cpu_kick	= eznps_smp_wakeup_cpu,
 	.cpu_kick	= eznps_smp_wakeup_cpu,
 	.ipi_send	= eznps_ipi_send,
 	.ipi_send	= eznps_ipi_send,
 	.init_per_cpu	= eznps_init_per_cpu,
 	.init_per_cpu	= eznps_init_per_cpu,
-	.ipi_clear	= eznps_ipi_clear,
 };
 };

+ 1 - 0
arch/arm/include/asm/kvm_asm.h

@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 

+ 3 - 0
arch/arm/include/asm/kvm_host.h

@@ -57,6 +57,9 @@ struct kvm_arch {
 	/* VTTBR value associated with below pgd and vmid */
 	/* VTTBR value associated with below pgd and vmid */
 	u64    vttbr;
 	u64    vttbr;
 
 
+	/* The last vcpu id that ran on each physical CPU */
+	int __percpu *last_vcpu_ran;
+
 	/* Timer */
 	/* Timer */
 	struct arch_timer_kvm	timer;
 	struct arch_timer_kvm	timer;
 
 

+ 1 - 0
arch/arm/include/asm/kvm_hyp.h

@@ -71,6 +71,7 @@
 #define ICIALLUIS	__ACCESS_CP15(c7, 0, c1, 0)
 #define ICIALLUIS	__ACCESS_CP15(c7, 0, c1, 0)
 #define ATS1CPR		__ACCESS_CP15(c7, 0, c8, 0)
 #define ATS1CPR		__ACCESS_CP15(c7, 0, c8, 0)
 #define TLBIALLIS	__ACCESS_CP15(c8, 0, c3, 0)
 #define TLBIALLIS	__ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL		__ACCESS_CP15(c8, 0, c7, 0)
 #define TLBIALLNSNHIS	__ACCESS_CP15(c8, 4, c3, 4)
 #define TLBIALLNSNHIS	__ACCESS_CP15(c8, 4, c3, 4)
 #define PRRR		__ACCESS_CP15(c10, 0, c2, 0)
 #define PRRR		__ACCESS_CP15(c10, 0, c2, 0)
 #define NMRR		__ACCESS_CP15(c10, 0, c2, 1)
 #define NMRR		__ACCESS_CP15(c10, 0, c2, 1)

+ 26 - 1
arch/arm/kvm/arm.c

@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
  */
  */
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
 {
-	int ret = 0;
+	int ret, cpu;
 
 
 	if (type)
 	if (type)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+	if (!kvm->arch.last_vcpu_ran)
+		return -ENOMEM;
+
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
 	ret = kvm_alloc_stage2_pgd(kvm);
 	ret = kvm_alloc_stage2_pgd(kvm);
 	if (ret)
 	if (ret)
 		goto out_fail_alloc;
 		goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 out_free_stage2_pgd:
 out_free_stage2_pgd:
 	kvm_free_stage2_pgd(kvm);
 	kvm_free_stage2_pgd(kvm);
 out_fail_alloc:
 out_fail_alloc:
+	free_percpu(kvm->arch.last_vcpu_ran);
+	kvm->arch.last_vcpu_ran = NULL;
 	return ret;
 	return ret;
 }
 }
 
 
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 {
 	int i;
 	int i;
 
 
+	free_percpu(kvm->arch.last_vcpu_ran);
+	kvm->arch.last_vcpu_ran = NULL;
+
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 		if (kvm->vcpus[i]) {
 		if (kvm->vcpus[i]) {
 			kvm_arch_vcpu_free(kvm->vcpus[i]);
 			kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 {
+	int *last_ran;
+
+	last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+	/*
+	 * We might get preempted before the vCPU actually runs, but
+	 * over-invalidation doesn't affect correctness.
+	 */
+	if (*last_ran != vcpu->vcpu_id) {
+		kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+		*last_ran = vcpu->vcpu_id;
+	}
+
 	vcpu->cpu = cpu;
 	vcpu->cpu = cpu;
 	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 
 

+ 15 - 0
arch/arm/kvm/hyp/tlb.c

@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 	__kvm_tlb_flush_vmid(kvm);
 	__kvm_tlb_flush_vmid(kvm);
 }
 }
 
 
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+	/* Switch to requested VMID */
+	write_sysreg(kvm->arch.vttbr, VTTBR);
+	isb();
+
+	write_sysreg(0, TLBIALL);
+	dsb(nsh);
+	isb();
+
+	write_sysreg(0, VTTBR);
+}
+
 void __hyp_text __kvm_flush_vm_context(void)
 void __hyp_text __kvm_flush_vm_context(void)
 {
 {
 	write_sysreg(0, TLBIALLNSNHIS);
 	write_sysreg(0, TLBIALLNSNHIS);

+ 5 - 2
arch/arm64/boot/dts/rockchip/rk3399.dtsi

@@ -300,8 +300,11 @@
 		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
 		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
 			  0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
 			  0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
 		resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
 		resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
-			 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
-		reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+			 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+			 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+			 <&cru SRST_A_PCIE>;
+		reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+			      "pm", "pclk", "aclk";
 		status = "disabled";
 		status = "disabled";
 
 
 		pcie0_intc: interrupt-controller {
 		pcie0_intc: interrupt-controller {

+ 1 - 0
arch/arm64/include/asm/kvm_asm.h

@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 

+ 3 - 0
arch/arm64/include/asm/kvm_host.h

@@ -62,6 +62,9 @@ struct kvm_arch {
 	/* VTTBR value associated with above pgd and vmid */
 	/* VTTBR value associated with above pgd and vmid */
 	u64    vttbr;
 	u64    vttbr;
 
 
+	/* The last vcpu id that ran on each physical CPU */
+	int __percpu *last_vcpu_ran;
+
 	/* The maximum number of vCPUs depends on the used GIC model */
 	/* The maximum number of vCPUs depends on the used GIC model */
 	int max_vcpus;
 	int max_vcpus;
 
 

+ 1 - 1
arch/arm64/include/asm/kvm_mmu.h

@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 	return v;
 	return v;
 }
 }
 
 
-#define kern_hyp_va(v) 	(typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
 
 /*
 /*
  * We currently only support a 40bit IPA.
  * We currently only support a 40bit IPA.

+ 15 - 0
arch/arm64/kvm/hyp/tlb.c

@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 	write_sysreg(0, vttbr_el2);
 	write_sysreg(0, vttbr_el2);
 }
 }
 
 
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+	/* Switch to requested VMID */
+	write_sysreg(kvm->arch.vttbr, vttbr_el2);
+	isb();
+
+	asm volatile("tlbi vmalle1" : : );
+	dsb(nsh);
+	isb();
+
+	write_sysreg(0, vttbr_el2);
+}
+
 void __hyp_text __kvm_flush_vm_context(void)
 void __hyp_text __kvm_flush_vm_context(void)
 {
 {
 	dsb(ishst);
 	dsb(ishst);

+ 1 - 0
arch/nios2/kernel/time.c

@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
 		ret = nios2_clocksource_init(timer);
 		ret = nios2_clocksource_init(timer);
 		break;
 		break;
 	default:
 	default:
+		ret = 0;
 		break;
 		break;
 	}
 	}
 
 

+ 2 - 0
arch/s390/kernel/vmlinux.lds.S

@@ -62,9 +62,11 @@ SECTIONS
 
 
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);
 	__start_ro_after_init = .;
 	__start_ro_after_init = .;
+	__start_data_ro_after_init = .;
 	.data..ro_after_init : {
 	.data..ro_after_init : {
 		 *(.data..ro_after_init)
 		 *(.data..ro_after_init)
 	}
 	}
+	__end_data_ro_after_init = .;
 	EXCEPTION_TABLE(16)
 	EXCEPTION_TABLE(16)
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);
 	__end_ro_after_init = .;
 	__end_ro_after_init = .;

+ 1 - 1
arch/s390/pci/pci_dma.c

@@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	dma_addr_t dma_addr_base, dma_addr;
 	dma_addr_t dma_addr_base, dma_addr;
 	int flags = ZPCI_PTE_VALID;
 	int flags = ZPCI_PTE_VALID;
 	struct scatterlist *s;
 	struct scatterlist *s;
-	unsigned long pa;
+	unsigned long pa = 0;
 	int ret;
 	int ret;
 
 
 	size = PAGE_ALIGN(size);
 	size = PAGE_ALIGN(size);

+ 3 - 0
arch/tile/include/asm/cache.h

@@ -61,4 +61,7 @@
  */
  */
 #define __write_once __read_mostly
 #define __write_once __read_mostly
 
 
+/* __ro_after_init is the generic name for the tile arch __write_once. */
+#define __ro_after_init __read_mostly
+
 #endif /* _ASM_TILE_CACHE_H */
 #endif /* _ASM_TILE_CACHE_H */

+ 2 - 2
arch/x86/crypto/aesni-intel_glue.c

@@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	struct scatter_walk src_sg_walk;
 	struct scatter_walk src_sg_walk;
-	struct scatter_walk dst_sg_walk;
+	struct scatter_walk dst_sg_walk = {};
 	unsigned int i;
 	unsigned int i;
 
 
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* Assuming we are supporting rfc4106 64-bit extended */
@@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	u8 authTag[16];
 	u8 authTag[16];
 	struct scatter_walk src_sg_walk;
 	struct scatter_walk src_sg_walk;
-	struct scatter_walk dst_sg_walk;
+	struct scatter_walk dst_sg_walk = {};
 	unsigned int i;
 	unsigned int i;
 
 
 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))

+ 28 - 4
arch/x86/events/intel/uncore_snb.c

@@ -8,8 +8,12 @@
 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
-#define PCI_DEVICE_ID_INTEL_SKL_IMC	0x191f
-#define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x1904
+#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC	0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC	0x1900
+#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC	0x1910
+#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC	0x190f
+#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC	0x191f
 
 
 /* SNB event control */
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
@@ -616,13 +620,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
 
 
 static const struct pci_device_id skl_uncore_pci_ids[] = {
 static const struct pci_device_id skl_uncore_pci_ids[] = {
 	{ /* IMC */
 	{ /* IMC */
-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 	},
 	},
 	{ /* IMC */
 	{ /* IMC */
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 	},
 	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
 
 
 	{ /* end: all zeroes */ },
 	{ /* end: all zeroes */ },
 };
 };
@@ -666,8 +686,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
-	IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
+	IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
+	IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
+	IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
+	IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
+	IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
 	{  /* end marker */ }
 	{  /* end marker */ }
 };
 };
 
 

+ 1 - 0
arch/x86/include/asm/intel-mid.h

@@ -17,6 +17,7 @@
 
 
 extern int intel_mid_pci_init(void);
 extern int intel_mid_pci_init(void);
 extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
 extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
 
 
 extern void intel_mid_pwr_power_off(void);
 extern void intel_mid_pwr_power_off(void);
 
 

+ 4 - 1
arch/x86/kernel/apm_32.c

@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
 
 
 	if (apm_info.get_power_status_broken)
 	if (apm_info.get_power_status_broken)
 		return APM_32_UNSUPPORTED;
 		return APM_32_UNSUPPORTED;
-	if (apm_bios_call(&call))
+	if (apm_bios_call(&call)) {
+		if (!call.err)
+			return APM_NO_ERROR;
 		return call.err;
 		return call.err;
+	}
 	*status = call.ebx;
 	*status = call.ebx;
 	*bat = call.ecx;
 	*bat = call.ecx;
 	if (apm_info.get_power_status_swabinminutes) {
 	if (apm_info.get_power_status_swabinminutes) {

+ 1 - 5
arch/x86/kernel/cpu/amd.c

@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	unsigned bits;
 	unsigned bits;
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
-	unsigned int socket_id, core_complex_id;
 
 
 	bits = c->x86_coreid_bits;
 	bits = c->x86_coreid_bits;
 	/* Low order bits define the core id (index of core in socket) */
 	/* Low order bits define the core id (index of core in socket) */
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
 	 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
 	 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
 		return;
 		return;
 
 
-	socket_id	= (c->apicid >> bits) - 1;
-	core_complex_id	= (c->apicid & ((1 << bits) - 1)) >> 3;
-
-	per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
+	per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
 #endif
 #endif
 }
 }
 
 

+ 30 - 2
arch/x86/kernel/cpu/common.c

@@ -978,6 +978,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
 	}
 	}
 }
 }
 
 
+/*
+ * The physical to logical package id mapping is initialized from the
+ * acpi/mptables information. Make sure that CPUID actually agrees with
+ * that.
+ */
+static void sanitize_package_id(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+	unsigned int pkg, apicid, cpu = smp_processor_id();
+
+	apicid = apic->cpu_present_to_apicid(cpu);
+	pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+
+	if (apicid != c->initial_apicid) {
+		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
+		       cpu, apicid, c->initial_apicid);
+		c->initial_apicid = apicid;
+	}
+	if (pkg != c->phys_proc_id) {
+		pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
+		       cpu, pkg, c->phys_proc_id);
+		c->phys_proc_id = pkg;
+	}
+	c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
+#else
+	c->logical_proc_id = 0;
+#endif
+}
+
 /*
 /*
  * This does the hard work of actually picking apart the CPU stuff...
  * This does the hard work of actually picking apart the CPU stuff...
  */
  */
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
 #ifdef CONFIG_NUMA
 #ifdef CONFIG_NUMA
 	numa_add_cpu(smp_processor_id());
 	numa_add_cpu(smp_processor_id());
 #endif
 #endif
-	/* The boot/hotplug time assigment got cleared, restore it */
-	c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
+	sanitize_package_id(c);
 }
 }
 
 
 /*
 /*

+ 1 - 1
arch/x86/platform/efi/efi.c

@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
 	int count = 0, pg_shift = 0;
 	int count = 0, pg_shift = 0;
 	void *new_memmap = NULL;
 	void *new_memmap = NULL;
 	efi_status_t status;
 	efi_status_t status;
-	phys_addr_t pa;
+	unsigned long pa;
 
 
 	efi.systab = NULL;
 	efi.systab = NULL;
 
 

+ 57 - 23
arch/x86/platform/efi/efi_64.c

@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/ucs2_string.h>
 
 
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/page.h>
 #include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
 	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 }
 }
 
 
+/*
+ * Wrapper for slow_virt_to_phys() that handles NULL addresses.
+ */
+static inline phys_addr_t
+virt_to_phys_or_null_size(void *va, unsigned long size)
+{
+	bool bad_size;
+
+	if (!va)
+		return 0;
+
+	if (virt_addr_valid(va))
+		return virt_to_phys(va);
+
+	/*
+	 * A fully aligned variable on the stack is guaranteed not to
+	 * cross a page bounary. Try to catch strings on the stack by
+	 * checking that 'size' is a power of two.
+	 */
+	bad_size = size > PAGE_SIZE || !is_power_of_2(size);
+
+	WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
+
+	return slow_virt_to_phys(va);
+}
+
+#define virt_to_phys_or_null(addr)				\
+	virt_to_phys_or_null_size((addr), sizeof(*(addr)))
+
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
 {
 	unsigned long pfn, text;
 	unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_tm = virt_to_phys(tm);
-	phys_tc = virt_to_phys(tc);
+	phys_tm = virt_to_phys_or_null(tm);
+	phys_tc = virt_to_phys_or_null(tc);
 
 
 	status = efi_thunk(get_time, phys_tm, phys_tc);
 	status = efi_thunk(get_time, phys_tm, phys_tc);
 
 
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_tm = virt_to_phys(tm);
+	phys_tm = virt_to_phys_or_null(tm);
 
 
 	status = efi_thunk(set_time, phys_tm);
 	status = efi_thunk(set_time, phys_tm);
 
 
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_enabled = virt_to_phys(enabled);
-	phys_pending = virt_to_phys(pending);
-	phys_tm = virt_to_phys(tm);
+	phys_enabled = virt_to_phys_or_null(enabled);
+	phys_pending = virt_to_phys_or_null(pending);
+	phys_tm = virt_to_phys_or_null(tm);
 
 
 	status = efi_thunk(get_wakeup_time, phys_enabled,
 	status = efi_thunk(get_wakeup_time, phys_enabled,
 			     phys_pending, phys_tm);
 			     phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_tm = virt_to_phys(tm);
+	phys_tm = virt_to_phys_or_null(tm);
 
 
 	status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 	status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 
 
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 	return status;
 	return status;
 }
 }
 
 
+static unsigned long efi_name_size(efi_char16_t *name)
+{
+	return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
+}
 
 
 static efi_status_t
 static efi_status_t
 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 	u32 phys_name, phys_vendor, phys_attr;
 	u32 phys_name, phys_vendor, phys_attr;
 	u32 phys_data_size, phys_data;
 	u32 phys_data_size, phys_data;
 
 
-	phys_data_size = virt_to_phys(data_size);
-	phys_vendor = virt_to_phys(vendor);
-	phys_name = virt_to_phys(name);
-	phys_attr = virt_to_phys(attr);
-	phys_data = virt_to_phys(data);
+	phys_data_size = virt_to_phys_or_null(data_size);
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+	phys_attr = virt_to_phys_or_null(attr);
+	phys_data = virt_to_phys_or_null_size(data, *data_size);
 
 
 	status = efi_thunk(get_variable, phys_name, phys_vendor,
 	status = efi_thunk(get_variable, phys_name, phys_vendor,
 			   phys_attr, phys_data_size, phys_data);
 			   phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 	u32 phys_name, phys_vendor, phys_data;
 	u32 phys_name, phys_vendor, phys_data;
 	efi_status_t status;
 	efi_status_t status;
 
 
-	phys_name = virt_to_phys(name);
-	phys_vendor = virt_to_phys(vendor);
-	phys_data = virt_to_phys(data);
+	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_data = virt_to_phys_or_null_size(data, data_size);
 
 
 	/* If data_size is > sizeof(u32) we've got problems */
 	/* If data_size is > sizeof(u32) we've got problems */
 	status = efi_thunk(set_variable, phys_name, phys_vendor,
 	status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
 	efi_status_t status;
 	efi_status_t status;
 	u32 phys_name_size, phys_name, phys_vendor;
 	u32 phys_name_size, phys_name, phys_vendor;
 
 
-	phys_name_size = virt_to_phys(name_size);
-	phys_vendor = virt_to_phys(vendor);
-	phys_name = virt_to_phys(name);
+	phys_name_size = virt_to_phys_or_null(name_size);
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_name = virt_to_phys_or_null_size(name, *name_size);
 
 
 	status = efi_thunk(get_next_variable, phys_name_size,
 	status = efi_thunk(get_next_variable, phys_name_size,
 			   phys_name, phys_vendor);
 			   phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
 	efi_status_t status;
 	efi_status_t status;
 	u32 phys_count;
 	u32 phys_count;
 
 
-	phys_count = virt_to_phys(count);
+	phys_count = virt_to_phys_or_null(count);
 	status = efi_thunk(get_next_high_mono_count, phys_count);
 	status = efi_thunk(get_next_high_mono_count, phys_count);
 
 
 	return status;
 	return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
 {
 {
 	u32 phys_data;
 	u32 phys_data;
 
 
-	phys_data = virt_to_phys(data);
+	phys_data = virt_to_phys_or_null_size(data, data_size);
 
 
 	efi_thunk(reset_system, reset_type, status, data_size, phys_data);
 	efi_thunk(reset_system, reset_type, status, data_size, phys_data);
 }
 }
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 		return EFI_UNSUPPORTED;
 		return EFI_UNSUPPORTED;
 
 
-	phys_storage = virt_to_phys(storage_space);
-	phys_remaining = virt_to_phys(remaining_space);
-	phys_max = virt_to_phys(max_variable_size);
+	phys_storage = virt_to_phys_or_null(storage_space);
+	phys_remaining = virt_to_phys_or_null(remaining_space);
+	phys_max = virt_to_phys_or_null(max_variable_size);
 
 
 	status = efi_thunk(query_variable_info, attr, phys_storage,
 	status = efi_thunk(query_variable_info, attr, phys_storage,
 			   phys_remaining, phys_max);
 			   phys_remaining, phys_max);

+ 19 - 0
arch/x86/platform/intel-mid/pwr.c

@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
 }
 }
 EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
 EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
 
 
+pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
+{
+	struct mid_pwr *pwr = midpwr;
+	int id, reg, bit;
+	u32 power;
+
+	if (!pwr || !pwr->available)
+		return PCI_UNKNOWN;
+
+	id = intel_mid_pwr_get_lss_id(pdev);
+	if (id < 0)
+		return PCI_UNKNOWN;
+
+	reg = (id * LSS_PWS_BITS) / 32;
+	bit = (id * LSS_PWS_BITS) % 32;
+	power = mid_pwr_get_state(pwr, reg);
+	return (__force pci_power_t)((power >> bit) & 3);
+}
+
 void intel_mid_pwr_power_off(void)
 void intel_mid_pwr_power_off(void)
 {
 {
 	struct mid_pwr *pwr = midpwr;
 	struct mid_pwr *pwr = midpwr;

+ 2 - 8
drivers/acpi/acpi_apd.c

@@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev,
 	int ret;
 	int ret;
 
 
 	if (!dev_desc) {
 	if (!dev_desc) {
-		pdev = acpi_create_platform_device(adev);
+		pdev = acpi_create_platform_device(adev, NULL);
 		return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
 		return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
 	}
 	}
 
 
@@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev,
 			goto err_out;
 			goto err_out;
 	}
 	}
 
 
-	if (dev_desc->properties) {
-		ret = device_add_properties(&adev->dev, dev_desc->properties);
-		if (ret)
-			goto err_out;
-	}
-
 	adev->driver_data = pdata;
 	adev->driver_data = pdata;
-	pdev = acpi_create_platform_device(adev);
+	pdev = acpi_create_platform_device(adev, dev_desc->properties);
 	if (!IS_ERR_OR_NULL(pdev))
 	if (!IS_ERR_OR_NULL(pdev))
 		return 1;
 		return 1;
 
 

+ 2 - 8
drivers/acpi/acpi_lpss.c

@@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
 
 
 	dev_desc = (const struct lpss_device_desc *)id->driver_data;
 	dev_desc = (const struct lpss_device_desc *)id->driver_data;
 	if (!dev_desc) {
 	if (!dev_desc) {
-		pdev = acpi_create_platform_device(adev);
+		pdev = acpi_create_platform_device(adev, NULL);
 		return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
 		return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
 	}
 	}
 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
 		goto err_out;
 		goto err_out;
 	}
 	}
 
 
-	if (dev_desc->properties) {
-		ret = device_add_properties(&adev->dev, dev_desc->properties);
-		if (ret)
-			goto err_out;
-	}
-
 	adev->driver_data = pdata;
 	adev->driver_data = pdata;
-	pdev = acpi_create_platform_device(adev);
+	pdev = acpi_create_platform_device(adev, dev_desc->properties);
 	if (!IS_ERR_OR_NULL(pdev)) {
 	if (!IS_ERR_OR_NULL(pdev)) {
 		return 1;
 		return 1;
 	}
 	}

+ 4 - 1
drivers/acpi/acpi_platform.c

@@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
 /**
 /**
  * acpi_create_platform_device - Create platform device for ACPI device node
  * acpi_create_platform_device - Create platform device for ACPI device node
  * @adev: ACPI device node to create a platform device for.
  * @adev: ACPI device node to create a platform device for.
+ * @properties: Optional collection of build-in properties.
  *
  *
  * Check if the given @adev can be represented as a platform device and, if
  * Check if the given @adev can be represented as a platform device and, if
  * that's the case, create and register a platform device, populate its common
  * that's the case, create and register a platform device, populate its common
@@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
  *
  *
  * Name of the platform device will be the same as @adev's.
  * Name of the platform device will be the same as @adev's.
  */
  */
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
+					struct property_entry *properties)
 {
 {
 	struct platform_device *pdev = NULL;
 	struct platform_device *pdev = NULL;
 	struct platform_device_info pdevinfo;
 	struct platform_device_info pdevinfo;
@@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
 	pdevinfo.res = resources;
 	pdevinfo.res = resources;
 	pdevinfo.num_res = count;
 	pdevinfo.num_res = count;
 	pdevinfo.fwnode = acpi_fwnode_handle(adev);
 	pdevinfo.fwnode = acpi_fwnode_handle(adev);
+	pdevinfo.properties = properties;
 
 
 	if (acpi_dma_supported(adev))
 	if (acpi_dma_supported(adev))
 		pdevinfo.dma_mask = DMA_BIT_MASK(32);
 		pdevinfo.dma_mask = DMA_BIT_MASK(32);

+ 2 - 2
drivers/acpi/dptf/int340x_thermal.c

@@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
 					const struct acpi_device_id *id)
 					const struct acpi_device_id *id)
 {
 {
 	if (IS_ENABLED(CONFIG_INT340X_THERMAL))
 	if (IS_ENABLED(CONFIG_INT340X_THERMAL))
-		acpi_create_platform_device(adev);
+		acpi_create_platform_device(adev, NULL);
 	/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
 	/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
 	else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
 	else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
 		 id->driver_data == INT3401_DEVICE)
 		 id->driver_data == INT3401_DEVICE)
-		acpi_create_platform_device(adev);
+		acpi_create_platform_device(adev, NULL);
 	return 1;
 	return 1;
 }
 }
 
 

+ 1 - 1
drivers/acpi/scan.c

@@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
 			       &is_spi_i2c_slave);
 			       &is_spi_i2c_slave);
 	acpi_dev_free_resource_list(&resource_list);
 	acpi_dev_free_resource_list(&resource_list);
 	if (!is_spi_i2c_slave) {
 	if (!is_spi_i2c_slave) {
-		acpi_create_platform_device(device);
+		acpi_create_platform_device(device, NULL);
 		acpi_device_set_enumerated(device);
 		acpi_device_set_enumerated(device);
 	} else {
 	} else {
 		blocking_notifier_call_chain(&acpi_reconfig_chain,
 		blocking_notifier_call_chain(&acpi_reconfig_chain,

+ 3 - 2
drivers/base/dd.c

@@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
 {
 {
 	int ret = -EPROBE_DEFER;
 	int ret = -EPROBE_DEFER;
 	int local_trigger_count = atomic_read(&deferred_trigger_count);
 	int local_trigger_count = atomic_read(&deferred_trigger_count);
-	bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
+	bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+			   !drv->suppress_bind_attrs;
 
 
 	if (defer_all_probes) {
 	if (defer_all_probes) {
 		/*
 		/*
@@ -383,7 +384,7 @@ re_probe:
 	if (test_remove) {
 	if (test_remove) {
 		test_remove = false;
 		test_remove = false;
 
 
-		if (dev->bus && dev->bus->remove)
+		if (dev->bus->remove)
 			dev->bus->remove(dev);
 			dev->bus->remove(dev);
 		else if (drv->remove)
 		else if (drv->remove)
 			drv->remove(dev);
 			drv->remove(dev);

+ 4 - 4
drivers/base/power/main.c

@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
 	TRACE_DEVICE(dev);
 	TRACE_DEVICE(dev);
 	TRACE_SUSPEND(0);
 	TRACE_SUSPEND(0);
 
 
+	dpm_wait_for_children(dev, async);
+
 	if (async_error)
 	if (async_error)
 		goto Complete;
 		goto Complete;
 
 
@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
 	if (dev->power.syscore || dev->power.direct_complete)
 	if (dev->power.syscore || dev->power.direct_complete)
 		goto Complete;
 		goto Complete;
 
 
-	dpm_wait_for_children(dev, async);
-
 	if (dev->pm_domain) {
 	if (dev->pm_domain) {
 		info = "noirq power domain ";
 		info = "noirq power domain ";
 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
 
 
 	__pm_runtime_disable(dev, false);
 	__pm_runtime_disable(dev, false);
 
 
+	dpm_wait_for_children(dev, async);
+
 	if (async_error)
 	if (async_error)
 		goto Complete;
 		goto Complete;
 
 
@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
 	if (dev->power.syscore || dev->power.direct_complete)
 	if (dev->power.syscore || dev->power.direct_complete)
 		goto Complete;
 		goto Complete;
 
 
-	dpm_wait_for_children(dev, async);
-
 	if (dev->pm_domain) {
 	if (dev->pm_domain) {
 		info = "late power domain ";
 		info = "late power domain ";
 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
 		callback = pm_late_early_op(&dev->pm_domain->ops, state);

+ 0 - 41
drivers/block/aoe/aoecmd.c

@@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
 	return n;
 	return n;
 }
 }
 
 
-/* This can be removed if we are certain that no users of the block
- * layer will ever use zero-count pages in bios.  Otherwise we have to
- * protect against the put_page sometimes done by the network layer.
- *
- * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
- * discussion.
- *
- * We cannot use get_page in the workaround, because it insists on a
- * positive page count as a precondition.  So we use _refcount directly.
- */
-static void
-bio_pageinc(struct bio *bio)
-{
-	struct bio_vec bv;
-	struct page *page;
-	struct bvec_iter iter;
-
-	bio_for_each_segment(bv, bio, iter) {
-		/* Non-zero page count for non-head members of
-		 * compound pages is no longer allowed by the kernel.
-		 */
-		page = compound_head(bv.bv_page);
-		page_ref_inc(page);
-	}
-}
-
-static void
-bio_pagedec(struct bio *bio)
-{
-	struct page *page;
-	struct bio_vec bv;
-	struct bvec_iter iter;
-
-	bio_for_each_segment(bv, bio, iter) {
-		page = compound_head(bv.bv_page);
-		page_ref_dec(page);
-	}
-}
-
 static void
 static void
 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
 {
 {
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
 	buf->rq = rq;
 	buf->rq = rq;
 	buf->bio = bio;
 	buf->bio = bio;
 	buf->iter = bio->bi_iter;
 	buf->iter = bio->bi_iter;
-	bio_pageinc(bio);
 }
 }
 
 
 static struct buf *
 static struct buf *
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
 	if (buf == d->ip.buf)
 	if (buf == d->ip.buf)
 		d->ip.buf = NULL;
 		d->ip.buf = NULL;
 	rq = buf->rq;
 	rq = buf->rq;
-	bio_pagedec(buf->bio);
 	mempool_free(buf, d->bufpool);
 	mempool_free(buf, d->bufpool);
 	n = (unsigned long) rq->special;
 	n = (unsigned long) rq->special;
 	rq->special = (void *) --n;
 	rq->special = (void *) --n;

+ 1 - 1
drivers/block/drbd/drbd_main.c

@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
 		drbd_update_congested(connection);
 		drbd_update_congested(connection);
 	}
 	}
 	do {
 	do {
-		rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+		rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
 		if (rv == -EAGAIN) {
 		if (rv == -EAGAIN) {
 			if (we_should_drop_the_connection(connection, sock))
 			if (we_should_drop_the_connection(connection, sock))
 				break;
 				break;

+ 1 - 1
drivers/block/nbd.c

@@ -599,7 +599,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 			return -EINVAL;
 			return -EINVAL;
 
 
 		sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
 		sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
-		if (!sreq)
+		if (IS_ERR(sreq))
 			return -ENOMEM;
 			return -ENOMEM;
 
 
 		mutex_unlock(&nbd->tx_lock);
 		mutex_unlock(&nbd->tx_lock);

+ 0 - 3
drivers/char/ppdev.c

@@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file)
 	}
 	}
 
 
 	if (pp->pdev) {
 	if (pp->pdev) {
-		const char *name = pp->pdev->name;
-
 		parport_unregister_device(pp->pdev);
 		parport_unregister_device(pp->pdev);
-		kfree(name);
 		pp->pdev = NULL;
 		pp->pdev = NULL;
 		pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
 		pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
 	}
 	}

+ 8 - 5
drivers/clk/clk-qoriq.c

@@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
 					     struct mux_hwclock *hwc,
 					     struct mux_hwclock *hwc,
 					     const struct clk_ops *ops,
 					     const struct clk_ops *ops,
 					     unsigned long min_rate,
 					     unsigned long min_rate,
+					     unsigned long max_rate,
 					     unsigned long pct80_rate,
 					     unsigned long pct80_rate,
 					     const char *fmt, int idx)
 					     const char *fmt, int idx)
 {
 {
@@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
 			continue;
 			continue;
 		if (rate < min_rate)
 		if (rate < min_rate)
 			continue;
 			continue;
+		if (rate > max_rate)
+			continue;
 
 
 		parent_names[j] = div->name;
 		parent_names[j] = div->name;
 		hwc->parent_to_clksel[j] = i;
 		hwc->parent_to_clksel[j] = i;
@@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
 	struct mux_hwclock *hwc;
 	struct mux_hwclock *hwc;
 	const struct clockgen_pll_div *div;
 	const struct clockgen_pll_div *div;
 	unsigned long plat_rate, min_rate;
 	unsigned long plat_rate, min_rate;
-	u64 pct80_rate;
+	u64 max_rate, pct80_rate;
 	u32 clksel;
 	u32 clksel;
 
 
 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
@@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	pct80_rate = clk_get_rate(div->clk);
-	pct80_rate *= 8;
+	max_rate = clk_get_rate(div->clk);
+	pct80_rate = max_rate * 8;
 	do_div(pct80_rate, 10);
 	do_div(pct80_rate, 10);
 
 
 	plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
 	plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
@@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
 	else
 	else
 		min_rate = plat_rate / 2;
 		min_rate = plat_rate / 2;
 
 
-	return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+	return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
 				 pct80_rate, "cg-cmux%d", idx);
 				 pct80_rate, "cg-cmux%d", idx);
 }
 }
 
 
@@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
 	hwc->reg = cg->regs + 0x20 * idx + 0x10;
 	hwc->reg = cg->regs + 0x20 * idx + 0x10;
 	hwc->info = cg->info.hwaccel[idx];
 	hwc->info = cg->info.hwaccel[idx];
 
 
-	return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+	return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
 				 "cg-hwaccel%d", idx);
 				 "cg-hwaccel%d", idx);
 }
 }
 
 

+ 4 - 6
drivers/clk/clk-xgene.c

@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
 	struct xgene_clk *pclk = to_xgene_clk(hw);
 	struct xgene_clk *pclk = to_xgene_clk(hw);
 	unsigned long flags = 0;
 	unsigned long flags = 0;
 	u32 data;
 	u32 data;
-	phys_addr_t reg;
 
 
 	if (pclk->lock)
 	if (pclk->lock)
 		spin_lock_irqsave(pclk->lock, flags);
 		spin_lock_irqsave(pclk->lock, flags);
 
 
 	if (pclk->param.csr_reg != NULL) {
 	if (pclk->param.csr_reg != NULL) {
 		pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
 		pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
-		reg = __pa(pclk->param.csr_reg);
 		/* First enable the clock */
 		/* First enable the clock */
 		data = xgene_clk_read(pclk->param.csr_reg +
 		data = xgene_clk_read(pclk->param.csr_reg +
 					pclk->param.reg_clk_offset);
 					pclk->param.reg_clk_offset);
 		data |= pclk->param.reg_clk_mask;
 		data |= pclk->param.reg_clk_mask;
 		xgene_clk_write(data, pclk->param.csr_reg +
 		xgene_clk_write(data, pclk->param.csr_reg +
 					pclk->param.reg_clk_offset);
 					pclk->param.reg_clk_offset);
-		pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
-			clk_hw_get_name(hw), &reg,
+		pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
+			clk_hw_get_name(hw),
 			pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
 			pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
 			data);
 			data);
 
 
@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
 		data &= ~pclk->param.reg_csr_mask;
 		data &= ~pclk->param.reg_csr_mask;
 		xgene_clk_write(data, pclk->param.csr_reg +
 		xgene_clk_write(data, pclk->param.csr_reg +
 					pclk->param.reg_csr_offset);
 					pclk->param.reg_csr_offset);
-		pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
-			clk_hw_get_name(hw), &reg,
+		pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
+			clk_hw_get_name(hw),
 			pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
 			pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
 			data);
 			data);
 	}
 	}

+ 6 - 2
drivers/clk/imx/clk-pllv3.c

@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
 	temp64 *= mfn;
 	temp64 *= mfn;
 	do_div(temp64, mfd);
 	do_div(temp64, mfd);
 
 
-	return (parent_rate * div) + (u32)temp64;
+	return parent_rate * div + (unsigned long)temp64;
 }
 }
 
 
 static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
 static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
 	do_div(temp64, parent_rate);
 	do_div(temp64, parent_rate);
 	mfn = temp64;
 	mfn = temp64;
 
 
-	return parent_rate * div + parent_rate * mfn / mfd;
+	temp64 = (u64)parent_rate;
+	temp64 *= mfn;
+	do_div(temp64, mfd);
+
+	return parent_rate * div + (unsigned long)temp64;
 }
 }
 
 
 static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
 static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,

+ 1 - 1
drivers/clk/mmp/clk-of-mmp2.c

@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
 	}
 	}
 
 
 	pxa_unit->apmu_base = of_iomap(np, 1);
 	pxa_unit->apmu_base = of_iomap(np, 1);
-	if (!pxa_unit->mpmu_base) {
+	if (!pxa_unit->apmu_base) {
 		pr_err("failed to map apmu registers\n");
 		pr_err("failed to map apmu registers\n");
 		return;
 		return;
 	}
 	}

+ 1 - 1
drivers/clk/mmp/clk-of-pxa168.c

@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
 	}
 	}
 
 
 	pxa_unit->apmu_base = of_iomap(np, 1);
 	pxa_unit->apmu_base = of_iomap(np, 1);
-	if (!pxa_unit->mpmu_base) {
+	if (!pxa_unit->apmu_base) {
 		pr_err("failed to map apmu registers\n");
 		pr_err("failed to map apmu registers\n");
 		return;
 		return;
 	}
 	}

+ 2 - 2
drivers/clk/mmp/clk-of-pxa910.c

@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
 	}
 	}
 
 
 	pxa_unit->apmu_base = of_iomap(np, 1);
 	pxa_unit->apmu_base = of_iomap(np, 1);
-	if (!pxa_unit->mpmu_base) {
+	if (!pxa_unit->apmu_base) {
 		pr_err("failed to map apmu registers\n");
 		pr_err("failed to map apmu registers\n");
 		return;
 		return;
 	}
 	}
@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
 	}
 	}
 
 
 	pxa_unit->apbcp_base = of_iomap(np, 3);
 	pxa_unit->apbcp_base = of_iomap(np, 3);
-	if (!pxa_unit->mpmu_base) {
+	if (!pxa_unit->apbcp_base) {
 		pr_err("failed to map apbcp registers\n");
 		pr_err("failed to map apbcp registers\n");
 		return;
 		return;
 	}
 	}

+ 1 - 4
drivers/clk/rockchip/clk-ddr.c

@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
 	ddrclk->ddr_flag = ddr_flag;
 	ddrclk->ddr_flag = ddr_flag;
 
 
 	clk = clk_register(NULL, &ddrclk->hw);
 	clk = clk_register(NULL, &ddrclk->hw);
-	if (IS_ERR(clk)) {
-		pr_err("%s: could not register ddrclk %s\n", __func__,	name);
+	if (IS_ERR(clk))
 		kfree(ddrclk);
 		kfree(ddrclk);
-		return NULL;
-	}
 
 
 	return clk;
 	return clk;
 }
 }

+ 14 - 8
drivers/clk/samsung/clk-exynos-clkout.c

@@ -132,28 +132,34 @@ free_clkout:
 	pr_err("%s: failed to register clkout clock\n", __func__);
 	pr_err("%s: failed to register clkout clock\n", __func__);
 }
 }
 
 
+/*
+ * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
+ * the OF_POPULATED flag on the pmu device tree node, so later the
+ * Exynos PMU platform device can be properly probed with PMU driver.
+ */
+
 static void __init exynos4_clkout_init(struct device_node *node)
 static void __init exynos4_clkout_init(struct device_node *node)
 {
 {
 	exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
 	exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
 }
 }
-CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
 		exynos4_clkout_init);
 		exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
 		exynos4_clkout_init);
 		exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
 		exynos4_clkout_init);
 		exynos4_clkout_init);
-CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
 		exynos4_clkout_init);
 		exynos4_clkout_init);
 
 
 static void __init exynos5_clkout_init(struct device_node *node)
 static void __init exynos5_clkout_init(struct device_node *node)
 {
 {
 	exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
 	exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
 }
 }
-CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
 		exynos5_clkout_init);
 		exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
 		exynos5_clkout_init);
 		exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
 		exynos5_clkout_init);
 		exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
 		exynos5_clkout_init);
 		exynos5_clkout_init);

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c

@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
 {
 {
 	int i, ret;
 	int i, ret;
 	struct device *dev;
 	struct device *dev;
-
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
+	/* return early if no ACP */
+	if (!adev->acp.acp_genpd)
+		return 0;
+
 	for (i = 0; i < ACP_DEVS ; i++) {
 	for (i = 0; i < ACP_DEVS ; i++) {
 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
 		ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
 		ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);

+ 11 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c

@@ -795,10 +795,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 		if (!adev->pm.fw) {
 		if (!adev->pm.fw) {
 			switch (adev->asic_type) {
 			switch (adev->asic_type) {
 			case CHIP_TOPAZ:
 			case CHIP_TOPAZ:
-				strcpy(fw_name, "amdgpu/topaz_smc.bin");
+				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
+				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
+				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
+					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+				else
+					strcpy(fw_name, "amdgpu/topaz_smc.bin");
 				break;
 				break;
 			case CHIP_TONGA:
 			case CHIP_TONGA:
-				strcpy(fw_name, "amdgpu/tonga_smc.bin");
+				if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
+				    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
+					strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+				else
+					strcpy(fw_name, "amdgpu/tonga_smc.bin");
 				break;
 				break;
 			case CHIP_FIJI:
 			case CHIP_FIJI:
 				strcpy(fw_name, "amdgpu/fiji_smc.bin");
 				strcpy(fw_name, "amdgpu/fiji_smc.bin");

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c

@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector)
 {
 {
 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 
 
-	if (amdgpu_connector->ddc_bus->has_aux) {
+	if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
 		drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
 		drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
 		amdgpu_connector->ddc_bus->has_aux = false;
 		amdgpu_connector->ddc_bus->has_aux = false;
 	}
 	}

+ 24 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -735,8 +735,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
 
 
 static int __init amdgpu_init(void)
 static int __init amdgpu_init(void)
 {
 {
-	amdgpu_sync_init();
-	amdgpu_fence_slab_init();
+	int r;
+
+	r = amdgpu_sync_init();
+	if (r)
+		goto error_sync;
+
+	r = amdgpu_fence_slab_init();
+	if (r)
+		goto error_fence;
+
+	r = amd_sched_fence_slab_init();
+	if (r)
+		goto error_sched;
+
 	if (vgacon_text_force()) {
 	if (vgacon_text_force()) {
 		DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
 		DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
 		return -EINVAL;
 		return -EINVAL;
@@ -748,6 +760,15 @@ static int __init amdgpu_init(void)
 	amdgpu_register_atpx_handler();
 	amdgpu_register_atpx_handler();
 	/* let modprobe override vga console setting */
 	/* let modprobe override vga console setting */
 	return drm_pci_init(driver, pdriver);
 	return drm_pci_init(driver, pdriver);
+
+error_sched:
+	amdgpu_fence_slab_fini();
+
+error_fence:
+	amdgpu_sync_fini();
+
+error_sync:
+	return r;
 }
 }
 
 
 static void __exit amdgpu_exit(void)
 static void __exit amdgpu_exit(void)
@@ -756,6 +777,7 @@ static void __exit amdgpu_exit(void)
 	drm_pci_exit(driver, pdriver);
 	drm_pci_exit(driver, pdriver);
 	amdgpu_unregister_atpx_handler();
 	amdgpu_unregister_atpx_handler();
 	amdgpu_sync_fini();
 	amdgpu_sync_fini();
+	amd_sched_fence_slab_fini();
 	amdgpu_fence_slab_fini();
 	amdgpu_fence_slab_fini();
 }
 }
 
 

+ 2 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 
 
 	if ((amdgpu_runtime_pm != 0) &&
 	if ((amdgpu_runtime_pm != 0) &&
 	    amdgpu_has_atpx() &&
 	    amdgpu_has_atpx() &&
+	    (amdgpu_is_atpx_hybrid() ||
+	     amdgpu_has_atpx_dgpu_power_cntl()) &&
 	    ((flags & AMD_IS_APU) == 0))
 	    ((flags & AMD_IS_APU) == 0))
 		flags |= AMD_IS_PX;
 		flags |= AMD_IS_PX;
 
 

+ 2 - 0
drivers/gpu/drm/amd/amdgpu/vi.c

@@ -80,7 +80,9 @@
 #include "dce_virtual.h"
 #include "dce_virtual.h"
 
 
 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");

+ 1 - 1
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c

@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
 	PHM_FUNC_CHECK(hwmgr);
 	PHM_FUNC_CHECK(hwmgr);
 
 
 	if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
 	if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
-		return -EINVAL;
+		return false;
 
 
 	return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
 	return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
 }
 }

+ 4 - 2
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c

@@ -710,8 +710,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
 	uint32_t vol;
 	uint32_t vol;
 	int ret = 0;
 	int ret = 0;
 
 
-	if (hwmgr->chip_id < CHIP_POLARIS10) {
-		atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+	if (hwmgr->chip_id < CHIP_TONGA) {
+		ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+	} else if (hwmgr->chip_id < CHIP_POLARIS10) {
+		ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
 		if (*voltage >= 2000 || *voltage == 0)
 		if (*voltage >= 2000 || *voltage == 0)
 			*voltage = 1150;
 			*voltage = 1150;
 	} else {
 	} else {

+ 45 - 25
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c

@@ -1460,19 +1460,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
 
 
 
 
-	if (table_info == NULL)
-		return -EINVAL;
-
-	sclk_table = table_info->vdd_dep_on_sclk;
-
 	for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
 	for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
 		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
 		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
 
 
 		if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
 		if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
-			if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
+			if ((hwmgr->pp_table_version == PP_TABLE_V1)
+			    && !phm_get_sclk_for_voltage_evv(hwmgr,
 						table_info->vddgfx_lookup_table, vv_id, &sclk)) {
 						table_info->vddgfx_lookup_table, vv_id, &sclk)) {
 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 							PHM_PlatformCaps_ClockStretcher)) {
 							PHM_PlatformCaps_ClockStretcher)) {
+					if (table_info == NULL)
+						return -EINVAL;
+					sclk_table = table_info->vdd_dep_on_sclk;
+
 					for (j = 1; j < sclk_table->count; j++) {
 					for (j = 1; j < sclk_table->count; j++) {
 						if (sclk_table->entries[j].clk == sclk &&
 						if (sclk_table->entries[j].clk == sclk &&
 								sclk_table->entries[j].cks_enable == 0) {
 								sclk_table->entries[j].cks_enable == 0) {
@@ -1498,12 +1498,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
 				}
 				}
 			}
 			}
 		} else {
 		} else {
-
 			if ((hwmgr->pp_table_version == PP_TABLE_V0)
 			if ((hwmgr->pp_table_version == PP_TABLE_V0)
 				|| !phm_get_sclk_for_voltage_evv(hwmgr,
 				|| !phm_get_sclk_for_voltage_evv(hwmgr,
 					table_info->vddc_lookup_table, vv_id, &sclk)) {
 					table_info->vddc_lookup_table, vv_id, &sclk)) {
 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 						PHM_PlatformCaps_ClockStretcher)) {
 						PHM_PlatformCaps_ClockStretcher)) {
+					if (table_info == NULL)
+						return -EINVAL;
+					sclk_table = table_info->vdd_dep_on_sclk;
+
 					for (j = 1; j < sclk_table->count; j++) {
 					for (j = 1; j < sclk_table->count; j++) {
 						if (sclk_table->entries[j].clk == sclk &&
 						if (sclk_table->entries[j].clk == sclk &&
 								sclk_table->entries[j].cks_enable == 0) {
 								sclk_table->entries[j].cks_enable == 0) {
@@ -2133,9 +2136,11 @@ static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 
 	if (tab) {
 	if (tab) {
+		vddc = tab->vddc;
 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
 						   &data->vddc_leakage);
 						   &data->vddc_leakage);
 		tab->vddc = vddc;
 		tab->vddc = vddc;
+		vddci = tab->vddci;
 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
 						   &data->vddci_leakage);
 						   &data->vddci_leakage);
 		tab->vddci = vddci;
 		tab->vddci = vddci;
@@ -4228,18 +4233,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
 {
 {
 	struct phm_ppt_v1_information *table_info =
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)hwmgr->pptable;
 			(struct phm_ppt_v1_information *)hwmgr->pptable;
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
+	struct phm_clock_voltage_dependency_table *sclk_table;
 	int i;
 	int i;
 
 
-	if (table_info == NULL)
-		return -EINVAL;
-
-	dep_sclk_table = table_info->vdd_dep_on_sclk;
-
-	for (i = 0; i < dep_sclk_table->count; i++) {
-		clocks->clock[i] = dep_sclk_table->entries[i].clk;
-		clocks->count++;
+	if (hwmgr->pp_table_version == PP_TABLE_V1) {
+		if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
+			return -EINVAL;
+		dep_sclk_table = table_info->vdd_dep_on_sclk;
+		for (i = 0; i < dep_sclk_table->count; i++) {
+			clocks->clock[i] = dep_sclk_table->entries[i].clk;
+			clocks->count++;
+		}
+	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+		sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+		for (i = 0; i < sclk_table->count; i++) {
+			clocks->clock[i] = sclk_table->entries[i].clk;
+			clocks->count++;
+		}
 	}
 	}
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -4261,17 +4274,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
 			(struct phm_ppt_v1_information *)hwmgr->pptable;
 			(struct phm_ppt_v1_information *)hwmgr->pptable;
 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
 	int i;
 	int i;
+	struct phm_clock_voltage_dependency_table *mclk_table;
 
 
-	if (table_info == NULL)
-		return -EINVAL;
-
-	dep_mclk_table = table_info->vdd_dep_on_mclk;
-
-	for (i = 0; i < dep_mclk_table->count; i++) {
-		clocks->clock[i] = dep_mclk_table->entries[i].clk;
-		clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+	if (hwmgr->pp_table_version == PP_TABLE_V1) {
+		if (table_info == NULL)
+			return -EINVAL;
+		dep_mclk_table = table_info->vdd_dep_on_mclk;
+		for (i = 0; i < dep_mclk_table->count; i++) {
+			clocks->clock[i] = dep_mclk_table->entries[i].clk;
+			clocks->latency[i] = smu7_get_mem_latency(hwmgr,
 						dep_mclk_table->entries[i].clk);
 						dep_mclk_table->entries[i].clk);
-		clocks->count++;
+			clocks->count++;
+		}
+	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+		mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+		for (i = 0; i < mclk_table->count; i++) {
+			clocks->clock[i] = mclk_table->entries[i].clk;
+			clocks->count++;
+		}
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 3 - 3
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c

@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
 		struct phm_fan_speed_info *fan_speed_info)
 		struct phm_fan_speed_info *fan_speed_info)
 {
 {
 	if (hwmgr->thermal_controller.fanInfo.bNoFan)
 	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
+		return -ENODEV;
 
 
 	fan_speed_info->supports_percent_read = true;
 	fan_speed_info->supports_percent_read = true;
 	fan_speed_info->supports_percent_write = true;
 	fan_speed_info->supports_percent_write = true;
@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
 	uint64_t tmp64;
 	uint64_t tmp64;
 
 
 	if (hwmgr->thermal_controller.fanInfo.bNoFan)
 	if (hwmgr->thermal_controller.fanInfo.bNoFan)
-		return 0;
+		return -ENODEV;
 
 
 	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_FDO_CTRL1, FMAX_DUTY100);
 			CG_FDO_CTRL1, FMAX_DUTY100);
@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
 	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
 	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
 			(hwmgr->thermal_controller.fanInfo.
 			(hwmgr->thermal_controller.fanInfo.
 				ucTachometerPulsesPerRevolution == 0))
 				ucTachometerPulsesPerRevolution == 0))
-		return 0;
+		return -ENODEV;
 
 
 	tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 	tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 			CG_TACH_STATUS, TACH_PERIOD);
 			CG_TACH_STATUS, TACH_PERIOD);

+ 0 - 13
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
 
 
-struct kmem_cache *sched_fence_slab;
-atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
-
 /* Initialize a given run queue struct */
 /* Initialize a given run queue struct */
 static void amd_sched_rq_init(struct amd_sched_rq *rq)
 static void amd_sched_rq_init(struct amd_sched_rq *rq)
 {
 {
@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
 	INIT_LIST_HEAD(&sched->ring_mirror_list);
 	INIT_LIST_HEAD(&sched->ring_mirror_list);
 	spin_lock_init(&sched->job_list_lock);
 	spin_lock_init(&sched->job_list_lock);
 	atomic_set(&sched->hw_rq_count, 0);
 	atomic_set(&sched->hw_rq_count, 0);
-	if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
-		sched_fence_slab = kmem_cache_create(
-			"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
-			SLAB_HWCACHE_ALIGN, NULL);
-		if (!sched_fence_slab)
-			return -ENOMEM;
-	}
 
 
 	/* Each scheduler will run on a seperate kernel thread */
 	/* Each scheduler will run on a seperate kernel thread */
 	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
 	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -645,7 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
 {
 {
 	if (sched->thread)
 	if (sched->thread)
 		kthread_stop(sched->thread);
 		kthread_stop(sched->thread);
-	rcu_barrier();
-	if (atomic_dec_and_test(&sched_fence_slab_ref))
-		kmem_cache_destroy(sched_fence_slab);
 }
 }

+ 3 - 3
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

@@ -30,9 +30,6 @@
 struct amd_gpu_scheduler;
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
 struct amd_sched_rq;
 
 
-extern struct kmem_cache *sched_fence_slab;
-extern atomic_t sched_fence_slab_ref;
-
 /**
 /**
  * A scheduler entity is a wrapper around a job queue or a group
  * A scheduler entity is a wrapper around a job queue or a group
  * of other entities. Entities take turns emitting jobs from their
  * of other entities. Entities take turns emitting jobs from their
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 			   struct amd_sched_entity *entity);
 			   struct amd_sched_entity *entity);
 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 
 
+int amd_sched_fence_slab_init(void);
+void amd_sched_fence_slab_fini(void);
+
 struct amd_sched_fence *amd_sched_fence_create(
 struct amd_sched_fence *amd_sched_fence_create(
 	struct amd_sched_entity *s_entity, void *owner);
 	struct amd_sched_entity *s_entity, void *owner);
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);

+ 19 - 0
drivers/gpu/drm/amd/scheduler/sched_fence.c

@@ -27,6 +27,25 @@
 #include <drm/drmP.h>
 #include <drm/drmP.h>
 #include "gpu_scheduler.h"
 #include "gpu_scheduler.h"
 
 
+static struct kmem_cache *sched_fence_slab;
+
+int amd_sched_fence_slab_init(void)
+{
+	sched_fence_slab = kmem_cache_create(
+		"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+		SLAB_HWCACHE_ALIGN, NULL);
+	if (!sched_fence_slab)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void amd_sched_fence_slab_fini(void)
+{
+	rcu_barrier();
+	kmem_cache_destroy(sched_fence_slab);
+}
+
 struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
 struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
 					       void *owner)
 					       void *owner)
 {
 {

+ 17 - 3
drivers/gpu/drm/i915/i915_gem.c

@@ -1806,7 +1806,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 		/* Use a partial view if it is bigger than available space */
 		/* Use a partial view if it is bigger than available space */
 		chunk_size = MIN_CHUNK_PAGES;
 		chunk_size = MIN_CHUNK_PAGES;
 		if (i915_gem_object_is_tiled(obj))
 		if (i915_gem_object_is_tiled(obj))
-			chunk_size = max(chunk_size, tile_row_pages(obj));
+			chunk_size = roundup(chunk_size, tile_row_pages(obj));
 
 
 		memset(&view, 0, sizeof(view));
 		memset(&view, 0, sizeof(view));
 		view.type = I915_GGTT_VIEW_PARTIAL;
 		view.type = I915_GGTT_VIEW_PARTIAL;
@@ -3543,8 +3543,22 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	if (view->type == I915_GGTT_VIEW_NORMAL)
 	if (view->type == I915_GGTT_VIEW_NORMAL)
 		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
 		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
 					       PIN_MAPPABLE | PIN_NONBLOCK);
 					       PIN_MAPPABLE | PIN_NONBLOCK);
-	if (IS_ERR(vma))
-		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
+	if (IS_ERR(vma)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		unsigned int flags;
+
+		/* Valleyview is definitely limited to scanning out the first
+		 * 512MiB. Lets presume this behaviour was inherited from the
+		 * g4x display engine and that all earlier gen are similarly
+		 * limited. Testing suggests that it is a little more
+		 * complicated than this. For example, Cherryview appears quite
+		 * happy to scanout from anywhere within its global aperture.
+		 */
+		flags = 0;
+		if (HAS_GMCH_DISPLAY(i915))
+			flags = PIN_MAPPABLE;
+		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+	}
 	if (IS_ERR(vma))
 	if (IS_ERR(vma))
 		goto err_unpin_display;
 		goto err_unpin_display;
 
 

+ 26 - 3
drivers/gpu/drm/i915/intel_display.c

@@ -10243,6 +10243,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 	bxt_set_cdclk(to_i915(dev), req_cdclk);
 	bxt_set_cdclk(to_i915(dev), req_cdclk);
 }
 }
 
 
+static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+					  int pixel_rate)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+	if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+		pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+	/* BSpec says "Do not use DisplayPort with CDCLK less than
+	 * 432 MHz, audio enabled, port width x4, and link rate
+	 * HBR2 (5.4 GHz), or else there may be audio corruption or
+	 * screen corruption."
+	 */
+	if (intel_crtc_has_dp_encoder(crtc_state) &&
+	    crtc_state->has_audio &&
+	    crtc_state->port_clock >= 540000 &&
+	    crtc_state->lane_count == 4)
+		pixel_rate = max(432000, pixel_rate);
+
+	return pixel_rate;
+}
+
 /* compute the max rate for new configuration */
 /* compute the max rate for new configuration */
 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 {
 {
@@ -10268,9 +10291,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 
 
 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
 
 
-		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
-			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+		if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+			pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+								    pixel_rate);
 
 
 		intel_state->min_pixclk[i] = pixel_rate;
 		intel_state->min_pixclk[i] = pixel_rate;
 	}
 	}

+ 48 - 36
drivers/gpu/drm/i915/intel_hdmi.c

@@ -1799,6 +1799,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
 	intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 	intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 }
 }
 
 
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+			     enum port port)
+{
+	const struct ddi_vbt_port_info *info =
+		&dev_priv->vbt.ddi_port_info[port];
+	u8 ddc_pin;
+
+	if (info->alternate_ddc_pin) {
+		DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+			      info->alternate_ddc_pin, port_name(port));
+		return info->alternate_ddc_pin;
+	}
+
+	switch (port) {
+	case PORT_B:
+		if (IS_BROXTON(dev_priv))
+			ddc_pin = GMBUS_PIN_1_BXT;
+		else
+			ddc_pin = GMBUS_PIN_DPB;
+		break;
+	case PORT_C:
+		if (IS_BROXTON(dev_priv))
+			ddc_pin = GMBUS_PIN_2_BXT;
+		else
+			ddc_pin = GMBUS_PIN_DPC;
+		break;
+	case PORT_D:
+		if (IS_CHERRYVIEW(dev_priv))
+			ddc_pin = GMBUS_PIN_DPD_CHV;
+		else
+			ddc_pin = GMBUS_PIN_DPD;
+		break;
+	default:
+		MISSING_CASE(port);
+		ddc_pin = GMBUS_PIN_DPB;
+		break;
+	}
+
+	DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+		      ddc_pin, port_name(port));
+
+	return ddc_pin;
+}
+
 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			       struct intel_connector *intel_connector)
 			       struct intel_connector *intel_connector)
 {
 {
@@ -1808,7 +1852,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum port port = intel_dig_port->port;
 	enum port port = intel_dig_port->port;
-	uint8_t alternate_ddc_pin;
 
 
 	DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
 	DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
 		      port_name(port));
 		      port_name(port));
@@ -1826,12 +1869,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	connector->doublescan_allowed = 0;
 	connector->doublescan_allowed = 0;
 	connector->stereo_allowed = 1;
 	connector->stereo_allowed = 1;
 
 
+	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+
 	switch (port) {
 	switch (port) {
 	case PORT_B:
 	case PORT_B:
-		if (IS_BROXTON(dev_priv))
-			intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
-		else
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
 		/*
 		/*
 		 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
 		 * On BXT A0/A1, sw needs to activate DDIA HPD logic and
 		 * interrupts to check the external panel connection.
 		 * interrupts to check the external panel connection.
@@ -1842,46 +1883,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			intel_encoder->hpd_pin = HPD_PORT_B;
 			intel_encoder->hpd_pin = HPD_PORT_B;
 		break;
 		break;
 	case PORT_C:
 	case PORT_C:
-		if (IS_BROXTON(dev_priv))
-			intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
-		else
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
 		intel_encoder->hpd_pin = HPD_PORT_C;
 		intel_encoder->hpd_pin = HPD_PORT_C;
 		break;
 		break;
 	case PORT_D:
 	case PORT_D:
-		if (WARN_ON(IS_BROXTON(dev_priv)))
-			intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
-		else if (IS_CHERRYVIEW(dev_priv))
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
-		else
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
 		break;
 	case PORT_E:
 	case PORT_E:
-		/* On SKL PORT E doesn't have seperate GMBUS pin
-		 *  We rely on VBT to set a proper alternate GMBUS pin. */
-		alternate_ddc_pin =
-			dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
-		switch (alternate_ddc_pin) {
-		case DDC_PIN_B:
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
-			break;
-		case DDC_PIN_C:
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
-			break;
-		case DDC_PIN_D:
-			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
-			break;
-		default:
-			MISSING_CASE(alternate_ddc_pin);
-		}
 		intel_encoder->hpd_pin = HPD_PORT_E;
 		intel_encoder->hpd_pin = HPD_PORT_E;
 		break;
 		break;
-	case PORT_A:
-		intel_encoder->hpd_pin = HPD_PORT_A;
-		/* Internal port only for eDP. */
 	default:
 	default:
-		BUG();
+		MISSING_CASE(port);
+		return;
 	}
 	}
 
 
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {

+ 3 - 1
drivers/gpu/drm/i915/intel_runtime_pm.c

@@ -1139,7 +1139,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
 
 
 	intel_power_sequencer_reset(dev_priv);
 	intel_power_sequencer_reset(dev_priv);
 
 
-	intel_hpd_poll_init(dev_priv);
+	/* Prevent us from re-enabling polling on accident in late suspend */
+	if (!dev_priv->drm.dev->power.is_suspended)
+		intel_hpd_poll_init(dev_priv);
 }
 }
 
 
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,

+ 6 - 3
drivers/gpu/drm/imx/ipuv3-crtc.c

@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
 
 
 	ipu_dc_disable_channel(ipu_crtc->dc);
 	ipu_dc_disable_channel(ipu_crtc->dc);
 	ipu_di_disable(ipu_crtc->di);
 	ipu_di_disable(ipu_crtc->di);
+	/*
+	 * Planes must be disabled before DC clock is removed, as otherwise the
+	 * attached IDMACs will be left in undefined state, possibly hanging
+	 * the IPU or even system.
+	 */
+	drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
 	ipu_dc_disable(ipu);
 	ipu_dc_disable(ipu);
 
 
 	spin_lock_irq(&crtc->dev->event_lock);
 	spin_lock_irq(&crtc->dev->event_lock);
@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
 	}
 	}
 	spin_unlock_irq(&crtc->dev->event_lock);
 	spin_unlock_irq(&crtc->dev->event_lock);
 
 
-	/* always disable planes on the CRTC */
-	drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
-
 	drm_crtc_vblank_off(crtc);
 	drm_crtc_vblank_off(crtc);
 }
 }
 
 

+ 12 - 2
drivers/gpu/drm/msm/dsi/dsi_host.c

@@ -139,6 +139,7 @@ struct msm_dsi_host {
 
 
 	u32 err_work_state;
 	u32 err_work_state;
 	struct work_struct err_work;
 	struct work_struct err_work;
+	struct work_struct hpd_work;
 	struct workqueue_struct *workqueue;
 	struct workqueue_struct *workqueue;
 
 
 	/* DSI 6G TX buffer*/
 	/* DSI 6G TX buffer*/
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
 	wmb();	/* make sure dsi controller enabled again */
 	wmb();	/* make sure dsi controller enabled again */
 }
 }
 
 
+static void dsi_hpd_worker(struct work_struct *work)
+{
+	struct msm_dsi_host *msm_host =
+		container_of(work, struct msm_dsi_host, hpd_work);
+
+	drm_helper_hpd_irq_event(msm_host->dev);
+}
+
 static void dsi_err_worker(struct work_struct *work)
 static void dsi_err_worker(struct work_struct *work)
 {
 {
 	struct msm_dsi_host *msm_host =
 	struct msm_dsi_host *msm_host =
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
 
 
 	DBG("id=%d", msm_host->id);
 	DBG("id=%d", msm_host->id);
 	if (msm_host->dev)
 	if (msm_host->dev)
-		drm_helper_hpd_irq_event(msm_host->dev);
+		queue_work(msm_host->workqueue, &msm_host->hpd_work);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
 
 
 	DBG("id=%d", msm_host->id);
 	DBG("id=%d", msm_host->id);
 	if (msm_host->dev)
 	if (msm_host->dev)
-		drm_helper_hpd_irq_event(msm_host->dev);
+		queue_work(msm_host->workqueue, &msm_host->hpd_work);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 	/* setup workqueue */
 	/* setup workqueue */
 	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
 	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
 	INIT_WORK(&msm_host->err_work, dsi_err_worker);
 	INIT_WORK(&msm_host->err_work, dsi_err_worker);
+	INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
 
 
 	msm_dsi->host = &msm_host->base;
 	msm_dsi->host = &msm_host->base;
 	msm_dsi->id = msm_host->id;
 	msm_dsi->id = msm_host->id;

+ 1 - 0
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c

@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
 		.parent_names = (const char *[]){ "xo" },
 		.parent_names = (const char *[]){ "xo" },
 		.num_parents = 1,
 		.num_parents = 1,
 		.name = vco_name,
 		.name = vco_name,
+		.flags = CLK_IGNORE_UNUSED,
 		.ops = &clk_ops_dsi_pll_28nm_vco,
 		.ops = &clk_ops_dsi_pll_28nm_vco,
 	};
 	};
 	struct device *dev = &pll_28nm->pdev->dev;
 	struct device *dev = &pll_28nm->pdev->dev;

+ 1 - 0
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c

@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
 	struct clk_init_data vco_init = {
 	struct clk_init_data vco_init = {
 		.parent_names = (const char *[]){ "pxo" },
 		.parent_names = (const char *[]){ "pxo" },
 		.num_parents = 1,
 		.num_parents = 1,
+		.flags = CLK_IGNORE_UNUSED,
 		.ops = &clk_ops_dsi_pll_28nm_vco,
 		.ops = &clk_ops_dsi_pll_28nm_vco,
 	};
 	};
 	struct device *dev = &pll_28nm->pdev->dev;
 	struct device *dev = &pll_28nm->pdev->dev;

+ 1 - 0
drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c

@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
 	.ops = &hdmi_8996_pll_ops,
 	.ops = &hdmi_8996_pll_ops,
 	.parent_names = hdmi_pll_parents,
 	.parent_names = hdmi_pll_parents,
 	.num_parents = ARRAY_SIZE(hdmi_pll_parents),
 	.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+	.flags = CLK_IGNORE_UNUSED,
 };
 };
 
 
 int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 int msm_hdmi_pll_8996_init(struct platform_device *pdev)

+ 1 - 0
drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c

@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
 	.ops = &hdmi_pll_ops,
 	.ops = &hdmi_pll_ops,
 	.parent_names = hdmi_pll_parents,
 	.parent_names = hdmi_pll_parents,
 	.num_parents = ARRAY_SIZE(hdmi_pll_parents),
 	.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+	.flags = CLK_IGNORE_UNUSED,
 };
 };
 
 
 int msm_hdmi_pll_8960_init(struct platform_device *pdev)
 int msm_hdmi_pll_8960_init(struct platform_device *pdev)

+ 2 - 2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c

@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
 		.count = 2,
 		.count = 2,
 		.base = { 0x14000, 0x16000 },
 		.base = { 0x14000, 0x16000 },
 		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
 		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
-				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	},
 	.pipe_dma = {
 	.pipe_dma = {
 		.count = 1,
 		.count = 1,
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
 	.lm = {
 	.lm = {
 		.count = 2, /* LM0 and LM3 */
 		.count = 2, /* LM0 and LM3 */
 		.base = { 0x44000, 0x47000 },
 		.base = { 0x44000, 0x47000 },
-		.nb_stages = 5,
+		.nb_stages = 8,
 		.max_width = 2048,
 		.max_width = 2048,
 		.max_height = 0xFFFF,
 		.max_height = 0xFFFF,
 	},
 	},

+ 28 - 18
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c

@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc)
 		plane_cnt++;
 		plane_cnt++;
 	}
 	}
 
 
-	/*
-	* If there is no base layer, enable border color.
-	* Although it's not possbile in current blend logic,
-	* put it here as a reminder.
-	*/
-	if (!pstates[STAGE_BASE] && plane_cnt) {
+	if (!pstates[STAGE_BASE]) {
 		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
 		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
 		DBG("Border Color is enabled");
 		DBG("Border Color is enabled");
 	}
 	}
@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b)
 	return pa->state->zpos - pb->state->zpos;
 	return pa->state->zpos - pb->state->zpos;
 }
 }
 
 
+/* is there a helper for this? */
+static bool is_fullscreen(struct drm_crtc_state *cstate,
+		struct drm_plane_state *pstate)
+{
+	return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
+		((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
+		((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
+}
+
 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 		struct drm_crtc_state *state)
 {
 {
@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	struct plane_state pstates[STAGE_MAX + 1];
 	struct plane_state pstates[STAGE_MAX + 1];
 	const struct mdp5_cfg_hw *hw_cfg;
 	const struct mdp5_cfg_hw *hw_cfg;
 	const struct drm_plane_state *pstate;
 	const struct drm_plane_state *pstate;
-	int cnt = 0, i;
+	int cnt = 0, base = 0, i;
 
 
 	DBG("%s: check", mdp5_crtc->name);
 	DBG("%s: check", mdp5_crtc->name);
 
 
-	/* verify that there are not too many planes attached to crtc
-	 * and that we don't have conflicting mixer stages:
-	 */
-	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
-		if (cnt >= (hw_cfg->lm.nb_stages)) {
-			dev_err(dev->dev, "too many planes!\n");
-			return -EINVAL;
-		}
-
-
 		pstates[cnt].plane = plane;
 		pstates[cnt].plane = plane;
 		pstates[cnt].state = to_mdp5_plane_state(pstate);
 		pstates[cnt].state = to_mdp5_plane_state(pstate);
 
 
@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	/* assign a stage based on sorted zpos property */
 	/* assign a stage based on sorted zpos property */
 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 
 
+	/* if the bottom-most layer is not fullscreen, we need to use
+	 * it for solid-color:
+	 */
+	if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
+		base++;
+
+	/* verify that there are not too many planes attached to crtc
+	 * and that we don't have conflicting mixer stages:
+	 */
+	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+	if ((cnt + base) >= hw_cfg->lm.nb_stages) {
+		dev_err(dev->dev, "too many planes!\n");
+		return -EINVAL;
+	}
+
 	for (i = 0; i < cnt; i++) {
 	for (i = 0; i < cnt; i++) {
-		pstates[i].state->stage = STAGE_BASE + i;
+		pstates[i].state->stage = STAGE_BASE + i + base;
 		DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
 		DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
 				pipe2name(mdp5_plane_pipe(pstates[i].plane)),
 				pipe2name(mdp5_plane_pipe(pstates[i].plane)),
 				pstates[i].state->stage);
 				pstates[i].state->stage);

+ 3 - 6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c

@@ -292,8 +292,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 		format = to_mdp_format(msm_framebuffer_format(state->fb));
 		format = to_mdp_format(msm_framebuffer_format(state->fb));
 		if (MDP_FORMAT_IS_YUV(format) &&
 		if (MDP_FORMAT_IS_YUV(format) &&
 			!pipe_supports_yuv(mdp5_plane->caps)) {
 			!pipe_supports_yuv(mdp5_plane->caps)) {
-			dev_err(plane->dev->dev,
-				"Pipe doesn't support YUV\n");
+			DBG("Pipe doesn't support YUV\n");
 
 
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
@@ -301,8 +300,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 		if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
 		if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
 			(((state->src_w >> 16) != state->crtc_w) ||
 			(((state->src_w >> 16) != state->crtc_w) ||
 			((state->src_h >> 16) != state->crtc_h))) {
 			((state->src_h >> 16) != state->crtc_h))) {
-			dev_err(plane->dev->dev,
-				"Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+			DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
 				state->src_w >> 16, state->src_h >> 16,
 				state->src_w >> 16, state->src_h >> 16,
 				state->crtc_w, state->crtc_h);
 				state->crtc_w, state->crtc_h);
 
 
@@ -313,8 +311,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 		vflip = !!(state->rotation & DRM_REFLECT_Y);
 		vflip = !!(state->rotation & DRM_REFLECT_Y);
 		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
 		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
 			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
 			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
-			dev_err(plane->dev->dev,
-				"Pipe doesn't support flip\n");
+			DBG("Pipe doesn't support flip\n");
 
 
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}

+ 1 - 1
drivers/gpu/drm/msm/msm_drv.c

@@ -228,7 +228,7 @@ static int msm_drm_uninit(struct device *dev)
 	flush_workqueue(priv->atomic_wq);
 	flush_workqueue(priv->atomic_wq);
 	destroy_workqueue(priv->atomic_wq);
 	destroy_workqueue(priv->atomic_wq);
 
 
-	if (kms)
+	if (kms && kms->funcs)
 		kms->funcs->destroy(kms);
 		kms->funcs->destroy(kms);
 
 
 	if (gpu) {
 	if (gpu) {

+ 5 - 2
drivers/gpu/drm/msm/msm_gem_shrinker.c

@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
 void msm_gem_shrinker_cleanup(struct drm_device *dev)
 void msm_gem_shrinker_cleanup(struct drm_device *dev)
 {
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_drm_private *priv = dev->dev_private;
-	WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
-	unregister_shrinker(&priv->shrinker);
+
+	if (priv->shrinker.nr_deferred) {
+		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+		unregister_shrinker(&priv->shrinker);
+	}
 }
 }

+ 1 - 1
drivers/gpu/drm/radeon/radeon_connectors.c

@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector)
 {
 {
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
 
-	if (radeon_connector->ddc_bus->has_aux) {
+	if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
 		drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
 		drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
 		radeon_connector->ddc_bus->has_aux = false;
 		radeon_connector->ddc_bus->has_aux = false;
 	}
 	}

+ 13 - 0
drivers/gpu/drm/radeon/radeon_device.c

@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
 	"LAST",
 	"LAST",
 };
 };
 
 
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 
 
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
 
 
 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
 		rdev->flags &= ~RADEON_IS_PX;
 		rdev->flags &= ~RADEON_IS_PX;
+
+	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+	if (!radeon_is_atpx_hybrid() &&
+	    !radeon_has_atpx_dgpu_power_cntl())
+		rdev->flags &= ~RADEON_IS_PX;
 }
 }
 
 
 /**
 /**

Some files were not shown because too many files changed in this diff