瀏覽代碼

Merge tag 'asoc-fix-v4.3-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v4.3

A bunch of driver fixes plus one core fix which fixes problems with
misreporting values from _SX controls following a recent refactoring.
This had gone unnoticed as such controls are quite rare.
Takashi Iwai 9 年之前
父節點
當前提交
b9b6e4ac2d
共有 100 個文件被更改,包括 584 次插入320 次删除
  1. 7 3
      Documentation/device-mapper/snapshot.txt
  2. 1 1
      Documentation/devicetree/bindings/spi/sh-msiof.txt
  3. 1 0
      Documentation/devicetree/bindings/usb/renesas_usbhs.txt
  4. 26 3
      MAINTAINERS
  5. 2 2
      Makefile
  6. 2 0
      arch/alpha/include/asm/word-at-a-time.h
  7. 1 1
      arch/arm/boot/dts/Makefile
  8. 1 0
      arch/arm/boot/dts/exynos4412.dtsi
  9. 1 0
      arch/arm/boot/dts/exynos5250-smdk5250.dts
  10. 1 1
      arch/arm/boot/dts/exynos5420.dtsi
  11. 0 1
      arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
  12. 1 1
      arch/arm/boot/dts/imx53-qsrb.dts
  13. 1 0
      arch/arm/boot/dts/imx53.dtsi
  14. 0 2
      arch/arm/boot/dts/imx6qdl-rex.dtsi
  15. 1 0
      arch/arm/boot/dts/r8a7790.dtsi
  16. 1 0
      arch/arm/boot/dts/r8a7791.dtsi
  17. 1 1
      arch/arm/boot/dts/sun7i-a20.dtsi
  18. 26 1
      arch/arm/mach-exynos/mcpm-exynos.c
  19. 6 0
      arch/arm/mach-exynos/regs-pmu.h
  20. 1 1
      arch/arm64/Makefile
  21. 1 1
      arch/arm64/include/asm/unistd.h
  22. 9 0
      arch/arm64/include/asm/unistd32.h
  23. 3 0
      arch/arm64/include/uapi/asm/signal.h
  24. 12 11
      arch/arm64/kernel/debug-monitors.c
  25. 3 3
      arch/arm64/kernel/insn.c
  26. 2 0
      arch/arm64/kernel/setup.c
  27. 1 0
      arch/arm64/mm/fault.c
  28. 1 0
      arch/h8300/include/asm/Kbuild
  29. 1 0
      arch/mips/include/asm/io.h
  30. 7 12
      arch/mips/include/uapi/asm/swab.h
  31. 1 1
      arch/powerpc/configs/ppc64_defconfig
  32. 1 1
      arch/powerpc/configs/pseries_defconfig
  33. 0 1
      arch/powerpc/include/asm/Kbuild
  34. 7 2
      arch/powerpc/include/asm/machdep.h
  35. 5 0
      arch/powerpc/include/asm/word-at-a-time.h
  36. 11 12
      arch/powerpc/mm/hash_native_64.c
  37. 5 2
      arch/powerpc/platforms/powernv/opal.c
  38. 0 5
      arch/powerpc/platforms/ps3/os-area.c
  39. 1 1
      arch/s390/boot/compressed/Makefile
  40. 1 1
      arch/s390/configs/default_defconfig
  41. 1 1
      arch/s390/configs/gcov_defconfig
  42. 1 1
      arch/s390/configs/performance_defconfig
  43. 1 1
      arch/s390/include/asm/numa.h
  44. 1 1
      arch/s390/include/asm/topology.h
  45. 1 0
      arch/s390/kernel/asm-offsets.c
  46. 29 1
      arch/s390/kernel/entry.S
  47. 37 29
      arch/s390/kernel/vtime.c
  48. 2 2
      arch/s390/numa/mode_emu.c
  49. 2 2
      arch/s390/numa/numa.c
  50. 1 0
      arch/sh/include/asm/page.h
  51. 2 0
      arch/sparc/crypto/aes_glue.c
  52. 1 0
      arch/sparc/crypto/camellia_glue.c
  53. 2 0
      arch/sparc/crypto/des_glue.c
  54. 0 1
      arch/tile/include/asm/Kbuild
  55. 7 1
      arch/tile/include/asm/word-at-a-time.h
  56. 1 0
      arch/x86/Kconfig
  57. 5 0
      arch/x86/crypto/camellia_aesni_avx_glue.c
  58. 2 4
      arch/x86/include/asm/kvm_host.h
  59. 2 2
      arch/x86/include/asm/xen/hypercall.h
  60. 7 3
      arch/x86/kvm/emulate.c
  61. 6 20
      arch/x86/kvm/vmx.c
  62. 75 60
      arch/x86/kvm/x86.c
  63. 24 0
      arch/x86/xen/enlighten.c
  64. 18 1
      arch/x86/xen/p2m.c
  65. 2 2
      arch/x86/xen/setup.c
  66. 2 1
      crypto/ahash.c
  67. 1 0
      drivers/acpi/acpica/acglobal.h
  68. 1 3
      drivers/acpi/acpica/actables.h
  69. 1 1
      drivers/acpi/acpica/evxfevnt.c
  70. 5 5
      drivers/acpi/acpica/tbfadt.c
  71. 2 24
      drivers/acpi/acpica/tbutils.c
  72. 6 16
      drivers/base/power/domain_governor.c
  73. 2 3
      drivers/base/regmap/regmap-debugfs.c
  74. 8 2
      drivers/block/rbd.c
  75. 0 1
      drivers/bus/Kconfig
  76. 3 1
      drivers/clk/mvebu/clk-cpu.c
  77. 5 5
      drivers/clk/samsung/clk-cpu.c
  78. 1 1
      drivers/clk/ti/clk-3xxx.c
  79. 1 17
      drivers/clk/ti/clk-7xx.c
  80. 2 2
      drivers/clk/ti/clkt_dflt.c
  81. 3 0
      drivers/cpufreq/acpi-cpufreq.c
  82. 3 1
      drivers/cpufreq/cpufreq.c
  83. 5 0
      drivers/cpufreq/intel_pstate.c
  84. 4 3
      drivers/devfreq/devfreq.c
  85. 6 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
  86. 3 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  87. 0 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
  88. 5 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  89. 16 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
  90. 4 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
  91. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
  92. 3 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
  93. 5 3
      drivers/gpu/drm/amd/amdgpu/ci_dpm.c
  94. 3 0
      drivers/gpu/drm/amd/amdgpu/cik.c
  95. 6 4
      drivers/gpu/drm/amd/amdgpu/cz_dpm.c
  96. 28 2
      drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
  97. 29 3
      drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
  98. 28 2
      drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
  99. 6 3
      drivers/gpu/drm/amd/amdgpu/kv_dpm.c
  100. 3 0
      drivers/gpu/drm/amd/amdgpu/vi.c

+ 7 - 3
Documentation/device-mapper/snapshot.txt

@@ -41,9 +41,13 @@ useless and be disabled, returning errors.  So it is important to monitor
 the amount of free space and expand the <COW device> before it fills up.
 the amount of free space and expand the <COW device> before it fills up.
 
 
 <persistent?> is P (Persistent) or N (Not persistent - will not survive
 <persistent?> is P (Persistent) or N (Not persistent - will not survive
-after reboot).
-The difference is that for transient snapshots less metadata must be
-saved on disk - they can be kept in memory by the kernel.
+after reboot).  O (Overflow) can be added as a persistent store option
+to allow userspace to advertise its support for seeing "Overflow" in the
+snapshot status.  So supported store types are "P", "PO" and "N".
+
+The difference between persistent and transient is with transient
+snapshots less metadata must be saved on disk - they can be kept in
+memory by the kernel.
 
 
 
 
 * snapshot-merge <origin> <COW device> <persistent> <chunksize>
 * snapshot-merge <origin> <COW device> <persistent> <chunksize>

+ 1 - 1
Documentation/devicetree/bindings/spi/sh-msiof.txt

@@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings:
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
 			 (default is 64)
 			 (default is 64)
 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words
 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words
-			 (default is 64, or 256 on R-Car Gen2)
+			 (default is 64)
 
 
 Pinctrl properties might be needed, too.  See
 Pinctrl properties might be needed, too.  See
 Documentation/devicetree/bindings/pinctrl/renesas,*.
 Documentation/devicetree/bindings/pinctrl/renesas,*.

+ 1 - 0
Documentation/devicetree/bindings/usb/renesas_usbhs.txt

@@ -5,6 +5,7 @@ Required properties:
 	- "renesas,usbhs-r8a7790"
 	- "renesas,usbhs-r8a7790"
 	- "renesas,usbhs-r8a7791"
 	- "renesas,usbhs-r8a7791"
 	- "renesas,usbhs-r8a7794"
 	- "renesas,usbhs-r8a7794"
+	- "renesas,usbhs-r8a7795"
   - reg: Base address and length of the register for the USBHS
   - reg: Base address and length of the register for the USBHS
   - interrupts: Interrupt specifier for the USBHS
   - interrupts: Interrupt specifier for the USBHS
   - clocks: A list of phandle + clock specifier pairs
   - clocks: A list of phandle + clock specifier pairs

+ 26 - 3
MAINTAINERS

@@ -3591,6 +3591,13 @@ F:	drivers/gpu/drm/i915/
 F:	include/drm/i915*
 F:	include/drm/i915*
 F:	include/uapi/drm/i915*
 F:	include/uapi/drm/i915*
 
 
+DRM DRIVERS FOR ATMEL HLCDC
+M:	Boris Brezillon <boris.brezillon@free-electrons.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Supported
+F:	drivers/gpu/drm/atmel-hlcdc/
+F:	Documentation/devicetree/bindings/drm/atmel/
+
 DRM DRIVERS FOR EXYNOS
 DRM DRIVERS FOR EXYNOS
 M:	Inki Dae <inki.dae@samsung.com>
 M:	Inki Dae <inki.dae@samsung.com>
 M:	Joonyoung Shim <jy0922.shim@samsung.com>
 M:	Joonyoung Shim <jy0922.shim@samsung.com>
@@ -3619,6 +3626,14 @@ S:	Maintained
 F:	drivers/gpu/drm/imx/
 F:	drivers/gpu/drm/imx/
 F:	Documentation/devicetree/bindings/drm/imx/
 F:	Documentation/devicetree/bindings/drm/imx/
 
 
+DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
+M:	Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+L:	dri-devel@lists.freedesktop.org
+T:	git git://github.com/patjak/drm-gma500
+S:	Maintained
+F:	drivers/gpu/drm/gma500
+F:	include/drm/gma500*
+
 DRM DRIVERS FOR NVIDIA TEGRA
 DRM DRIVERS FOR NVIDIA TEGRA
 M:	Thierry Reding <thierry.reding@gmail.com>
 M:	Thierry Reding <thierry.reding@gmail.com>
 M:	Terje Bergström <tbergstrom@nvidia.com>
 M:	Terje Bergström <tbergstrom@nvidia.com>
@@ -4003,7 +4018,7 @@ S:	Maintained
 F:	sound/usb/misc/ua101.c
 F:	sound/usb/misc/ua101.c
 
 
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M:	Matt Fleming <matt.fleming@intel.com>
+M:	Matt Fleming <matt@codeblueprint.co.uk>
 L:	linux-efi@vger.kernel.org
 L:	linux-efi@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 S:	Maintained
 S:	Maintained
@@ -4018,7 +4033,7 @@ F:	include/linux/efi*.h
 EFI VARIABLE FILESYSTEM
 EFI VARIABLE FILESYSTEM
 M:	Matthew Garrett <matthew.garrett@nebula.com>
 M:	Matthew Garrett <matthew.garrett@nebula.com>
 M:	Jeremy Kerr <jk@ozlabs.org>
 M:	Jeremy Kerr <jk@ozlabs.org>
-M:	Matt Fleming <matt.fleming@intel.com>
+M:	Matt Fleming <matt@codeblueprint.co.uk>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 L:	linux-efi@vger.kernel.org
 L:	linux-efi@vger.kernel.org
 S:	Maintained
 S:	Maintained
@@ -9101,6 +9116,15 @@ S: Supported
 F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
 F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
 F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
 F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
 
 
+SYNOPSYS DESIGNWARE I2C DRIVER
+M:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M:	Jarkko Nikula <jarkko.nikula@linux.intel.com>
+M:	Mika Westerberg <mika.westerberg@linux.intel.com>
+L:	linux-i2c@vger.kernel.org
+S:	Maintained
+F:	drivers/i2c/busses/i2c-designware-*
+F:	include/linux/platform_data/i2c-designware.h
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:	Seungwon Jeon <tgih.jun@samsung.com>
 M:	Seungwon Jeon <tgih.jun@samsung.com>
 M:	Jaehoon Chung <jh80.chung@samsung.com>
 M:	Jaehoon Chung <jh80.chung@samsung.com>
@@ -9914,7 +9938,6 @@ S:	Maintained
 F:	drivers/staging/lustre
 F:	drivers/staging/lustre
 
 
 STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
 STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
-M:	Julian Andres Klode <jak@jak-linux.org>
 M:	Marc Dietrich <marvin24@gmx.de>
 M:	Marc Dietrich <marvin24@gmx.de>
 L:	ac100@lists.launchpad.net (moderated for non-subscribers)
 L:	ac100@lists.launchpad.net (moderated for non-subscribers)
 L:	linux-tegra@vger.kernel.org
 L:	linux-tegra@vger.kernel.org

+ 2 - 2
Makefile

@@ -1,8 +1,8 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 3
 PATCHLEVEL = 3
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
-NAME = Hurr durr I'ma sheep
+EXTRAVERSION = -rc6
+NAME = Blurry Fish Butt
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
 # To see a list of typical targets execute "make help"

+ 2 - 0
arch/alpha/include/asm/word-at-a-time.h

@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
 #endif
 #endif
 }
 }
 
 
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
 #endif /* _ASM_WORD_AT_A_TIME_H */

+ 1 - 1
arch/arm/boot/dts/Makefile

@@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \
 	sun4i-a10-hackberry.dtb \
 	sun4i-a10-hackberry.dtb \
 	sun4i-a10-hyundai-a7hd.dtb \
 	sun4i-a10-hyundai-a7hd.dtb \
 	sun4i-a10-inet97fv2.dtb \
 	sun4i-a10-inet97fv2.dtb \
-	sun4i-a10-itead-iteaduino-plus.dts \
+	sun4i-a10-itead-iteaduino-plus.dtb \
 	sun4i-a10-jesurun-q5.dtb \
 	sun4i-a10-jesurun-q5.dtb \
 	sun4i-a10-marsboard.dtb \
 	sun4i-a10-marsboard.dtb \
 	sun4i-a10-mini-xplus.dtb \
 	sun4i-a10-mini-xplus.dtb \

+ 1 - 0
arch/arm/boot/dts/exynos4412.dtsi

@@ -98,6 +98,7 @@
 			opp-hz = /bits/ 64 <800000000>;
 			opp-hz = /bits/ 64 <800000000>;
 			opp-microvolt = <1000000>;
 			opp-microvolt = <1000000>;
 			clock-latency-ns = <200000>;
 			clock-latency-ns = <200000>;
+			opp-suspend;
 		};
 		};
 		opp07 {
 		opp07 {
 			opp-hz = /bits/ 64 <900000000>;
 			opp-hz = /bits/ 64 <900000000>;

+ 1 - 0
arch/arm/boot/dts/exynos5250-smdk5250.dts

@@ -197,6 +197,7 @@
 				regulator-name = "P1.8V_LDO_OUT10";
 				regulator-name = "P1.8V_LDO_OUT10";
 				regulator-min-microvolt = <1800000>;
 				regulator-min-microvolt = <1800000>;
 				regulator-max-microvolt = <1800000>;
 				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
 			};
 			};
 
 
 			ldo11_reg: LDO11 {
 			ldo11_reg: LDO11 {

+ 1 - 1
arch/arm/boot/dts/exynos5420.dtsi

@@ -1117,7 +1117,7 @@
 		interrupt-parent = <&combiner>;
 		interrupt-parent = <&combiner>;
 		interrupts = <3 0>;
 		interrupts = <3 0>;
 		clock-names = "sysmmu", "master";
 		clock-names = "sysmmu", "master";
-		clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
+		clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
 		power-domains = <&disp_pd>;
 		power-domains = <&disp_pd>;
 		#iommu-cells = <0>;
 		#iommu-cells = <0>;
 	};
 	};

+ 0 - 1
arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi

@@ -472,7 +472,6 @@
 	 */
 	 */
 	pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
 	pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
 	pinctrl-names = "default";
 	pinctrl-names = "default";
-	samsung,pwm-outputs = <0>;
 	status = "okay";
 	status = "okay";
 };
 };
 
 

+ 1 - 1
arch/arm/boot/dts/imx53-qsrb.dts

@@ -36,7 +36,7 @@
 		pinctrl-0 = <&pinctrl_pmic>;
 		pinctrl-0 = <&pinctrl_pmic>;
 		reg = <0x08>;
 		reg = <0x08>;
 		interrupt-parent = <&gpio5>;
 		interrupt-parent = <&gpio5>;
-		interrupts = <23 0x8>;
+		interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
 		regulators {
 		regulators {
 			sw1_reg: sw1a {
 			sw1_reg: sw1a {
 				regulator-name = "SW1";
 				regulator-name = "SW1";

+ 1 - 0
arch/arm/boot/dts/imx53.dtsi

@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/imx5-clock.h>
 #include <dt-bindings/clock/imx5-clock.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 
 / {
 / {
 	aliases {
 	aliases {

+ 0 - 2
arch/arm/boot/dts/imx6qdl-rex.dtsi

@@ -35,7 +35,6 @@
 			compatible = "regulator-fixed";
 			compatible = "regulator-fixed";
 			reg = <1>;
 			reg = <1>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usbh1>;
 			regulator-name = "usbh1_vbus";
 			regulator-name = "usbh1_vbus";
 			regulator-min-microvolt = <5000000>;
 			regulator-min-microvolt = <5000000>;
 			regulator-max-microvolt = <5000000>;
 			regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
 			compatible = "regulator-fixed";
 			compatible = "regulator-fixed";
 			reg = <2>;
 			reg = <2>;
 			pinctrl-names = "default";
 			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usbotg>;
 			regulator-name = "usb_otg_vbus";
 			regulator-name = "usb_otg_vbus";
 			regulator-min-microvolt = <5000000>;
 			regulator-min-microvolt = <5000000>;
 			regulator-max-microvolt = <5000000>;
 			regulator-max-microvolt = <5000000>;

+ 1 - 0
arch/arm/boot/dts/r8a7790.dtsi

@@ -1627,6 +1627,7 @@
 				"mix.0", "mix.1",
 				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
 				"clk_a", "clk_b", "clk_c", "clk_i";
+		power-domains = <&cpg_clocks>;
 
 
 		status = "disabled";
 		status = "disabled";
 
 

+ 1 - 0
arch/arm/boot/dts/r8a7791.dtsi

@@ -1677,6 +1677,7 @@
 				"mix.0", "mix.1",
 				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
 				"clk_a", "clk_b", "clk_c", "clk_i";
+		power-domains = <&cpg_clocks>;
 
 
 		status = "disabled";
 		status = "disabled";
 
 

+ 1 - 1
arch/arm/boot/dts/sun7i-a20.dtsi

@@ -107,7 +107,7 @@
 				720000	1200000
 				720000	1200000
 				528000	1100000
 				528000	1100000
 				312000	1000000
 				312000	1000000
-				144000	900000
+				144000	1000000
 				>;
 				>;
 			#cooling-cells = <2>;
 			#cooling-cells = <2>;
 			cooling-min-level = <0>;
 			cooling-min-level = <0>;

+ 26 - 1
arch/arm/mach-exynos/mcpm-exynos.c

@@ -20,6 +20,7 @@
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/cp15.h>
 #include <asm/cp15.h>
 #include <asm/mcpm.h>
 #include <asm/mcpm.h>
+#include <asm/smp_plat.h>
 
 
 #include "regs-pmu.h"
 #include "regs-pmu.h"
 #include "common.h"
 #include "common.h"
@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
 		cluster >= EXYNOS5420_NR_CLUSTERS)
 		cluster >= EXYNOS5420_NR_CLUSTERS)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	exynos_cpu_power_up(cpunr);
+	if (!exynos_cpu_power_state(cpunr)) {
+		exynos_cpu_power_up(cpunr);
+
+		/*
+		 * This assumes the cluster number of the big cores(Cortex A15)
+		 * is 0 and the Little cores(Cortex A7) is 1.
+		 * When the system was booted from the Little core,
+		 * they should be reset during power up cpu.
+		 */
+		if (cluster &&
+		    cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+			/*
+			 * Before we reset the Little cores, we should wait
+			 * the SPARE2 register is set to 1 because the init
+			 * codes of the iROM will set the register after
+			 * initialization.
+			 */
+			while (!pmu_raw_readl(S5P_PMU_SPARE2))
+				udelay(10);
+
+			pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
+					EXYNOS_SWRESET);
+		}
+	}
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 6 - 0
arch/arm/mach-exynos/regs-pmu.h

@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
 #define SPREAD_ENABLE						0xF
 #define SPREAD_ENABLE						0xF
 #define SPREAD_USE_STANDWFI					0xF
 #define SPREAD_USE_STANDWFI					0xF
 
 
+#define EXYNOS5420_KFC_CORE_RESET0				BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0				BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr)				\
+	((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
 #define EXYNOS5420_BB_CON1					0x0784
 #define EXYNOS5420_BB_CON1					0x0784
 #define EXYNOS5420_BB_SEL_EN					BIT(31)
 #define EXYNOS5420_BB_SEL_EN					BIT(31)
 #define EXYNOS5420_BB_PMOS_EN					BIT(7)
 #define EXYNOS5420_BB_PMOS_EN					BIT(7)

+ 1 - 1
arch/arm64/Makefile

@@ -42,7 +42,7 @@ endif
 CHECKFLAGS	+= -D__aarch64__
 CHECKFLAGS	+= -D__aarch64__
 
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
 ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
-CFLAGS_MODULE	+= -mcmodel=large
+KBUILD_CFLAGS_MODULE	+= -mcmodel=large
 endif
 endif
 
 
 # Default value
 # Default value

+ 1 - 1
arch/arm64/include/asm/unistd.h

@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
 
 
-#define __NR_compat_syscalls		388
+#define __NR_compat_syscalls		390
 #endif
 #endif
 
 
 #define __ARCH_WANT_SYS_CLONE
 #define __ARCH_WANT_SYS_CLONE

+ 9 - 0
arch/arm64/include/asm/unistd32.h

@@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
 __SYSCALL(__NR_bpf, sys_bpf)
 __SYSCALL(__NR_bpf, sys_bpf)
 #define __NR_execveat 387
 #define __NR_execveat 387
 __SYSCALL(__NR_execveat, compat_sys_execveat)
 __SYSCALL(__NR_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 388
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 389
+__SYSCALL(__NR_membarrier, sys_membarrier)
+
+/*
+ * Please add new compat syscalls above this comment and update
+ * __NR_compat_syscalls in asm/unistd.h.
+ */

+ 3 - 0
arch/arm64/include/uapi/asm/signal.h

@@ -19,6 +19,9 @@
 /* Required for AArch32 compatibility. */
 /* Required for AArch32 compatibility. */
 #define SA_RESTORER	0x04000000
 #define SA_RESTORER	0x04000000
 
 
+#define MINSIGSTKSZ 5120
+#define SIGSTKSZ    16384
+
 #include <asm-generic/signal.h>
 #include <asm-generic/signal.h>
 
 
 #endif
 #endif

+ 12 - 11
arch/arm64/kernel/debug-monitors.c

@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
 }
 }
 
 
 /*
 /*
- * Call registered single step handers
+ * Call registered single step handlers
  * There is no Syndrome info to check for determining the handler.
  * There is no Syndrome info to check for determining the handler.
  * So we call all the registered handlers, until the right handler is
  * So we call all the registered handlers, until the right handler is
  * found which returns zero.
  * found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
  * Use reader/writer locks instead of plain spinlock.
  * Use reader/writer locks instead of plain spinlock.
  */
  */
 static LIST_HEAD(break_hook);
 static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
 
 
 void register_break_hook(struct break_hook *hook)
 void register_break_hook(struct break_hook *hook)
 {
 {
-	write_lock(&break_hook_lock);
-	list_add(&hook->node, &break_hook);
-	write_unlock(&break_hook_lock);
+	spin_lock(&break_hook_lock);
+	list_add_rcu(&hook->node, &break_hook);
+	spin_unlock(&break_hook_lock);
 }
 }
 
 
 void unregister_break_hook(struct break_hook *hook)
 void unregister_break_hook(struct break_hook *hook)
 {
 {
-	write_lock(&break_hook_lock);
-	list_del(&hook->node);
-	write_unlock(&break_hook_lock);
+	spin_lock(&break_hook_lock);
+	list_del_rcu(&hook->node);
+	spin_unlock(&break_hook_lock);
+	synchronize_rcu();
 }
 }
 
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
 	struct break_hook *hook;
 	struct break_hook *hook;
 	int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
 	int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
 
 
-	read_lock(&break_hook_lock);
-	list_for_each_entry(hook, &break_hook, node)
+	rcu_read_lock();
+	list_for_each_entry_rcu(hook, &break_hook, node)
 		if ((esr & hook->esr_mask) == hook->esr_val)
 		if ((esr & hook->esr_mask) == hook->esr_val)
 			fn = hook->fn;
 			fn = hook->fn;
-	read_unlock(&break_hook_lock);
+	rcu_read_unlock();
 
 
 	return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 	return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
 }

+ 3 - 3
arch/arm64/kernel/insn.c

@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
 		aarch64_insn_is_bcond(insn));
 		aarch64_insn_is_bcond(insn));
 }
 }
 
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 
 static void __kprobes *patch_map(void *addr, int fixmap)
 static void __kprobes *patch_map(void *addr, int fixmap)
 {
 {
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
 	unsigned long flags = 0;
 	unsigned long flags = 0;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&patch_lock, flags);
+	raw_spin_lock_irqsave(&patch_lock, flags);
 	waddr = patch_map(addr, FIX_TEXT_POKE0);
 	waddr = patch_map(addr, FIX_TEXT_POKE0);
 
 
 	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 
 
 	patch_unmap(FIX_TEXT_POKE0);
 	patch_unmap(FIX_TEXT_POKE0);
-	spin_unlock_irqrestore(&patch_lock, flags);
+	raw_spin_unlock_irqrestore(&patch_lock, flags);
 
 
 	return ret;
 	return ret;
 }
 }

+ 2 - 0
arch/arm64/kernel/setup.c

@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
 		to_free = ram_end - orig_start;
 		to_free = ram_end - orig_start;
 
 
 	size = orig_end - orig_start;
 	size = orig_end - orig_start;
+	if (!size)
+		return;
 
 
 	/* initrd needs to be relocated completely inside linear mapping */
 	/* initrd needs to be relocated completely inside linear mapping */
 	new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
 	new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),

+ 1 - 0
arch/arm64/mm/fault.c

@@ -287,6 +287,7 @@ retry:
 			 * starvation.
 			 * starvation.
 			 */
 			 */
 			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
 			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			mm_flags |= FAULT_FLAG_TRIED;
 			goto retry;
 			goto retry;
 		}
 		}
 	}
 	}

+ 1 - 0
arch/h8300/include/asm/Kbuild

@@ -73,4 +73,5 @@ generic-y += uaccess.h
 generic-y += ucontext.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += unaligned.h
 generic-y += vga.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
 generic-y += xor.h

+ 1 - 0
arch/mips/include/asm/io.h

@@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
  */
  */
 #define ioremap_nocache(offset, size)					\
 #define ioremap_nocache(offset, size)					\
 	__ioremap_mode((offset), (size), _CACHE_UNCACHED)
 	__ioremap_mode((offset), (size), _CACHE_UNCACHED)
+#define ioremap_uc ioremap_nocache
 
 
 /*
 /*
  * ioremap_cachable -	map bus memory into CPU space
  * ioremap_cachable -	map bus memory into CPU space

+ 7 - 12
arch/mips/include/uapi/asm/swab.h

@@ -13,16 +13,15 @@
 
 
 #define __SWAB_64_THRU_32__
 #define __SWAB_64_THRU_32__
 
 
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||		\
-    defined(_MIPS_ARCH_LOONGSON3A)
+#if !defined(__mips16) &&					\
+	((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||	\
+	 defined(_MIPS_ARCH_LOONGSON3A))
 
 
-static inline __attribute__((nomips16)) __attribute_const__
-		__u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
 {
 {
 	__asm__(
 	__asm__(
 	"	.set	push			\n"
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
 	"	.set	arch=mips32r2		\n"
-	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	wsbh	%0, %1			\n"
 	"	.set	pop			\n"
 	"	.set	pop			\n"
 	: "=r" (x)
 	: "=r" (x)
@@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__
 }
 }
 #define __arch_swab16 __arch_swab16
 #define __arch_swab16 __arch_swab16
 
 
-static inline __attribute__((nomips16)) __attribute_const__
-		__u32 __arch_swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
 {
 	__asm__(
 	__asm__(
 	"	.set	push			\n"
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
 	"	.set	arch=mips32r2		\n"
-	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	wsbh	%0, %1			\n"
 	"	rotr	%0, %0, 16		\n"
 	"	rotr	%0, %0, 16		\n"
 	"	.set	pop			\n"
 	"	.set	pop			\n"
@@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__
  * 64-bit kernel on r2 CPUs.
  * 64-bit kernel on r2 CPUs.
  */
  */
 #ifdef __mips64
 #ifdef __mips64
-static inline __attribute__((nomips16)) __attribute_const__
-		__u64 __arch_swab64(__u64 x)
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 {
 {
 	__asm__(
 	__asm__(
 	"	.set	push			\n"
 	"	.set	push			\n"
 	"	.set	arch=mips64r2		\n"
 	"	.set	arch=mips64r2		\n"
-	"	.set	nomips16		\n"
 	"	dsbh	%0, %1			\n"
 	"	dsbh	%0, %1			\n"
 	"	dshd	%0, %0			\n"
 	"	dshd	%0, %0			\n"
 	"	.set	pop			\n"
 	"	.set	pop			\n"
@@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__
 }
 }
 #define __arch_swab64 __arch_swab64
 #define __arch_swab64 __arch_swab64
 #endif /* __mips64 */
 #endif /* __mips64 */
-#endif /* MIPS R2 or newer or Loongson 3A */
+#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
 #endif /* _ASM_SWAB_H */
 #endif /* _ASM_SWAB_H */

+ 1 - 1
arch/powerpc/configs/ppc64_defconfig

@@ -111,7 +111,7 @@ CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
 CONFIG_ATA=y

+ 1 - 1
arch/powerpc/configs/pseries_defconfig

@@ -114,7 +114,7 @@ CONFIG_SCSI_QLA_FC=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
 CONFIG_ATA=y

+ 0 - 1
arch/powerpc/include/asm/Kbuild

@@ -7,4 +7,3 @@ generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += preempt.h
 generic-y += rwsem.h
 generic-y += rwsem.h
 generic-y += vtime.h
 generic-y += vtime.h
-generic-y += word-at-a-time.h

+ 7 - 2
arch/powerpc/include/asm/machdep.h

@@ -61,8 +61,13 @@ struct machdep_calls {
 					       unsigned long addr,
 					       unsigned long addr,
 					       unsigned char *hpte_slot_array,
 					       unsigned char *hpte_slot_array,
 					       int psize, int ssize, int local);
 					       int psize, int ssize, int local);
-	/* special for kexec, to be called in real mode, linear mapping is
-	 * destroyed as well */
+	/*
+	 * Special for kexec.
+	 * To be called in real mode with interrupts disabled. No locks are
+	 * taken as such, concurrent access on pre POWER5 hardware could result
+	 * in a deadlock.
+	 * The linear mapping is destroyed as well.
+	 */
 	void		(*hpte_clear_all)(void);
 	void		(*hpte_clear_all)(void);
 
 
 	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,
 	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,

+ 5 - 0
arch/powerpc/include/asm/word-at-a-time.h

@@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
 	return (val + c->high_bits) & ~rhs;
 	return (val + c->high_bits) & ~rhs;
 }
 }
 
 
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+	return ~1ul << __fls(mask);
+}
+
 #else
 #else
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT

+ 11 - 12
arch/powerpc/mm/hash_native_64.c

@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  * be when they isi), and we are the only one left.  We rely on our kernel
  * be when they isi), and we are the only one left.  We rely on our kernel
  * mapping being 0xC0's and the hardware ignoring those two real bits.
  * mapping being 0xC0's and the hardware ignoring those two real bits.
  *
  *
+ * This must be called with interrupts disabled.
+ *
+ * Taking the native_tlbie_lock is unsafe here due to the possibility of
+ * lockdep being on. On pre POWER5 hardware, not taking the lock could
+ * cause deadlock. POWER5 and newer not taking the lock is fine. This only
+ * gets called during boot before secondary CPUs have come up and during
+ * crashdump and all bets are off anyway.
+ *
  * TODO: add batching support when enabled.  remember, no dynamic memory here,
  * TODO: add batching support when enabled.  remember, no dynamic memory here,
  * athough there is the control page available...
  * athough there is the control page available...
  */
  */
 static void native_hpte_clear(void)
 static void native_hpte_clear(void)
 {
 {
 	unsigned long vpn = 0;
 	unsigned long vpn = 0;
-	unsigned long slot, slots, flags;
+	unsigned long slot, slots;
 	struct hash_pte *hptep = htab_address;
 	struct hash_pte *hptep = htab_address;
 	unsigned long hpte_v;
 	unsigned long hpte_v;
 	unsigned long pteg_count;
 	unsigned long pteg_count;
@@ -596,13 +604,6 @@ static void native_hpte_clear(void)
 
 
 	pteg_count = htab_hash_mask + 1;
 	pteg_count = htab_hash_mask + 1;
 
 
-	local_irq_save(flags);
-
-	/* we take the tlbie lock and hold it.  Some hardware will
-	 * deadlock if we try to tlbie from two processors at once.
-	 */
-	raw_spin_lock(&native_tlbie_lock);
-
 	slots = pteg_count * HPTES_PER_GROUP;
 	slots = pteg_count * HPTES_PER_GROUP;
 
 
 	for (slot = 0; slot < slots; slot++, hptep++) {
 	for (slot = 0; slot < slots; slot++, hptep++) {
@@ -614,8 +615,8 @@ static void native_hpte_clear(void)
 		hpte_v = be64_to_cpu(hptep->v);
 		hpte_v = be64_to_cpu(hptep->v);
 
 
 		/*
 		/*
-		 * Call __tlbie() here rather than tlbie() since we
-		 * already hold the native_tlbie_lock.
+		 * Call __tlbie() here rather than tlbie() since we can't take the
+		 * native_tlbie_lock.
 		 */
 		 */
 		if (hpte_v & HPTE_V_VALID) {
 		if (hpte_v & HPTE_V_VALID) {
 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@@ -625,8 +626,6 @@ static void native_hpte_clear(void)
 	}
 	}
 
 
 	asm volatile("eieio; tlbsync; ptesync":::"memory");
 	asm volatile("eieio; tlbsync; ptesync":::"memory");
-	raw_spin_unlock(&native_tlbie_lock);
-	local_irq_restore(flags);
 }
 }
 
 
 /*
 /*

+ 5 - 2
arch/powerpc/platforms/powernv/opal.c

@@ -487,9 +487,12 @@ int opal_machine_check(struct pt_regs *regs)
 	 *    PRD component would have already got notified about this
 	 *    PRD component would have already got notified about this
 	 *    error through other channels.
 	 *    error through other channels.
 	 *
 	 *
-	 * In any case, let us just fall through. We anyway heading
-	 * down to panic path.
+	 * If hardware marked this as an unrecoverable MCE, we are
+	 * going to panic anyway. Even if it didn't, it's not safe to
+	 * continue at this point, so we should explicitly panic.
 	 */
 	 */
+
+	panic("PowerNV Unrecovered Machine Check");
 	return 0;
 	return 0;
 }
 }
 
 

+ 0 - 5
arch/powerpc/platforms/ps3/os-area.c

@@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
 	.key = OS_AREA_DB_KEY_RTC_DIFF
 	.key = OS_AREA_DB_KEY_RTC_DIFF
 };
 };
 
 
-static const struct os_area_db_id os_area_db_id_video_mode = {
-	.owner = OS_AREA_DB_OWNER_LINUX,
-	.key = OS_AREA_DB_KEY_VIDEO_MODE
-};
-
 #define SECONDS_FROM_1970_TO_2000 946684800LL
 #define SECONDS_FROM_1970_TO_2000 946684800LL
 
 
 /**
 /**

+ 1 - 1
arch/s390/boot/compressed/Makefile

@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
 
 
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 
 

+ 1 - 1
arch/s390/configs/default_defconfig

@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
 CONFIG_SCSI_DH_EMC=m

+ 1 - 1
arch/s390/configs/gcov_defconfig

@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
 CONFIG_SCSI_DH_EMC=m

+ 1 - 1
arch/s390/configs/performance_defconfig

@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
 CONFIG_SCSI_DH_EMC=m

+ 1 - 1
arch/s390/include/asm/numa.h

@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
 int __node_distance(int a, int b);
 int __node_distance(int a, int b);
 void numa_update_cpu_topology(void);
 void numa_update_cpu_topology(void);
 
 
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 extern int numa_debug_enabled;
 extern int numa_debug_enabled;
 
 
 #else
 #else

+ 1 - 1
arch/s390/include/asm/topology.h

@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
 #define cpumask_of_node cpumask_of_node
 #define cpumask_of_node cpumask_of_node
 static inline const struct cpumask *cpumask_of_node(int node)
 static inline const struct cpumask *cpumask_of_node(int node)
 {
 {
-	return node_to_cpumask_map[node];
+	return &node_to_cpumask_map[node];
 }
 }
 
 
 /*
 /*

+ 1 - 0
arch/s390/kernel/asm-offsets.c

@@ -176,6 +176,7 @@ int main(void)
 	DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
 	DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
 	DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
 	DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
 	DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
 	DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
+	DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
 	DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
 	DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
 	DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
 	DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
 	DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
 	DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));

+ 29 - 1
arch/s390/kernel/entry.S

@@ -733,6 +733,14 @@ ENTRY(psw_idle)
 	stg	%r3,__SF_EMPTY(%r15)
 	stg	%r3,__SF_EMPTY(%r15)
 	larl	%r1,.Lpsw_idle_lpsw+4
 	larl	%r1,.Lpsw_idle_lpsw+4
 	stg	%r1,__SF_EMPTY+8(%r15)
 	stg	%r1,__SF_EMPTY+8(%r15)
+#ifdef CONFIG_SMP
+	larl	%r1,smp_cpu_mtid
+	llgf	%r1,0(%r1)
+	ltgr	%r1,%r1
+	jz	.Lpsw_idle_stcctm
+	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+.Lpsw_idle_stcctm:
+#endif
 	STCK	__CLOCK_IDLE_ENTER(%r2)
 	STCK	__CLOCK_IDLE_ENTER(%r2)
 	stpt	__TIMER_IDLE_ENTER(%r2)
 	stpt	__TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
 .Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@ cleanup_critical:
 	jhe	1f
 	jhe	1f
 	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
 	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
 	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
 	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1:	# account system time going idle
+1:	# calculate idle cycles
+#ifdef CONFIG_SMP
+	clg	%r9,BASED(.Lcleanup_idle_insn)
+	jl	3f
+	larl	%r1,smp_cpu_mtid
+	llgf	%r1,0(%r1)
+	ltgr	%r1,%r1
+	jz	3f
+	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+	larl	%r3,mt_cycles
+	ag	%r3,__LC_PERCPU_OFFSET
+	la	%r4,__SF_EMPTY+16(%r15)
+2:	lg	%r0,0(%r3)
+	slg	%r0,0(%r4)
+	alg	%r0,64(%r4)
+	stg	%r0,0(%r3)
+	la	%r3,8(%r3)
+	la	%r4,8(%r4)
+	brct	%r1,2b
+#endif
+3:	# account system time going idle
 	lg	%r9,__LC_STEAL_TIMER
 	lg	%r9,__LC_STEAL_TIMER
 	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
 	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
 	slg	%r9,__LC_LAST_UPDATE_CLOCK
 	slg	%r9,__LC_LAST_UPDATE_CLOCK

+ 37 - 29
arch/s390/kernel/vtime.c

@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
 static atomic64_t virt_timer_current;
 static atomic64_t virt_timer_current;
 static atomic64_t virt_timer_elapsed;
 static atomic64_t virt_timer_elapsed;
 
 
-static DEFINE_PER_CPU(u64, mt_cycles[32]);
+DEFINE_PER_CPU(u64, mt_cycles[8]);
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
 	return elapsed >= atomic64_read(&virt_timer_current);
 	return elapsed >= atomic64_read(&virt_timer_current);
 }
 }
 
 
+static void update_mt_scaling(void)
+{
+	u64 cycles_new[8], *cycles_old;
+	u64 delta, fac, mult, div;
+	int i;
+
+	stcctm5(smp_cpu_mtid + 1, cycles_new);
+	cycles_old = this_cpu_ptr(mt_cycles);
+	fac = 1;
+	mult = div = 0;
+	for (i = 0; i <= smp_cpu_mtid; i++) {
+		delta = cycles_new[i] - cycles_old[i];
+		div += delta;
+		mult *= i + 1;
+		mult += delta * fac;
+		fac *= i + 1;
+	}
+	div *= fac;
+	if (div > 0) {
+		/* Update scaling factor */
+		__this_cpu_write(mt_scaling_mult, mult);
+		__this_cpu_write(mt_scaling_div, div);
+		memcpy(cycles_old, cycles_new,
+		       sizeof(u64) * (smp_cpu_mtid + 1));
+	}
+	__this_cpu_write(mt_scaling_jiffies, jiffies_64);
+}
+
 /*
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
  * to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 	struct thread_info *ti = task_thread_info(tsk);
 	struct thread_info *ti = task_thread_info(tsk);
 	u64 timer, clock, user, system, steal;
 	u64 timer, clock, user, system, steal;
 	u64 user_scaled, system_scaled;
 	u64 user_scaled, system_scaled;
-	int i;
 
 
 	timer = S390_lowcore.last_update_timer;
 	timer = S390_lowcore.last_update_timer;
 	clock = S390_lowcore.last_update_clock;
 	clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 	S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 	S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
 
-	/* Do MT utilization calculation */
+	/* Update MT utilization calculation */
 	if (smp_cpu_mtid &&
 	if (smp_cpu_mtid &&
-	    time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
-		u64 cycles_new[32], *cycles_old;
-		u64 delta, fac, mult, div;
-
-		cycles_old = this_cpu_ptr(mt_cycles);
-		if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
-			fac = 1;
-			mult = div = 0;
-			for (i = 0; i <= smp_cpu_mtid; i++) {
-				delta = cycles_new[i] - cycles_old[i];
-				div += delta;
-				mult *= i + 1;
-				mult += delta * fac;
-				fac *= i + 1;
-			}
-			div *= fac;
-			if (div > 0) {
-				/* Update scaling factor */
-				__this_cpu_write(mt_scaling_mult, mult);
-				__this_cpu_write(mt_scaling_div, div);
-				memcpy(cycles_old, cycles_new,
-				       sizeof(u64) * (smp_cpu_mtid + 1));
-			}
-		}
-		__this_cpu_write(mt_scaling_jiffies, jiffies_64);
-	}
+	    time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+		update_mt_scaling();
 
 
 	user = S390_lowcore.user_timer - ti->user_timer;
 	user = S390_lowcore.user_timer - ti->user_timer;
 	S390_lowcore.steal_timer -= user;
 	S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
 	S390_lowcore.last_update_timer = get_vtimer();
 	S390_lowcore.last_update_timer = get_vtimer();
 	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 
 
+	/* Update MT utilization calculation */
+	if (smp_cpu_mtid &&
+	    time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+		update_mt_scaling();
+
 	system = S390_lowcore.system_timer - ti->system_timer;
 	system = S390_lowcore.system_timer - ti->system_timer;
 	S390_lowcore.steal_timer -= system;
 	S390_lowcore.steal_timer -= system;
 	ti->system_timer = S390_lowcore.system_timer;
 	ti->system_timer = S390_lowcore.system_timer;

+ 2 - 2
arch/s390/numa/mode_emu.c

@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
 		cpumask_copy(&top->thread_mask, &core->mask);
 		cpumask_copy(&top->thread_mask, &core->mask);
 		cpumask_copy(&top->core_mask, &core_mc(core)->mask);
 		cpumask_copy(&top->core_mask, &core_mc(core)->mask);
 		cpumask_copy(&top->book_mask, &core_book(core)->mask);
 		cpumask_copy(&top->book_mask, &core_book(core)->mask);
-		cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+		cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
 		top->node_id = core_node(core)->id;
 		top->node_id = core_node(core)->id;
 	}
 	}
 }
 }
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
 
 
 	/* Clear all node masks */
 	/* Clear all node masks */
 	for (i = 0; i < MAX_NUMNODES; i++)
 	for (i = 0; i < MAX_NUMNODES; i++)
-		cpumask_clear(node_to_cpumask_map[i]);
+		cpumask_clear(&node_to_cpumask_map[i]);
 
 
 	/* Rebuild all masks */
 	/* Rebuild all masks */
 	toptree_for_each(core, numa, CORE)
 	toptree_for_each(core, numa, CORE)

+ 2 - 2
arch/s390/numa/numa.c

@@ -23,7 +23,7 @@
 pg_data_t *node_data[MAX_NUMNODES];
 pg_data_t *node_data[MAX_NUMNODES];
 EXPORT_SYMBOL(node_data);
 EXPORT_SYMBOL(node_data);
 
 
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 EXPORT_SYMBOL(node_to_cpumask_map);
 EXPORT_SYMBOL(node_to_cpumask_map);
 
 
 const struct numa_mode numa_mode_plain = {
 const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@ void __init numa_setup(void)
 static int __init numa_init_early(void)
 static int __init numa_init_early(void)
 {
 {
 	/* Attach all possible CPUs to node 0 for now. */
 	/* Attach all possible CPUs to node 0 for now. */
-	cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+	cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
 	return 0;
 	return 0;
 }
 }
 early_initcall(numa_init_early);
 early_initcall(numa_init_early);

+ 1 - 0
arch/sh/include/asm/page.h

@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
 
 
 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, void *from);
 extern void copy_page(void *to, void *from);
+#define copy_user_page(to, from, vaddr, pg)  __copy_user(to, from, PAGE_SIZE)
 
 
 struct page;
 struct page;
 struct vm_area_struct;
 struct vm_area_struct;

+ 2 - 0
arch/sparc/crypto/aes_glue.c

@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
 		.blkcipher = {
 		.blkcipher = {
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
 			.setkey		= aes_set_key,
 			.setkey		= aes_set_key,
 			.encrypt	= cbc_encrypt,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
 			.decrypt	= cbc_decrypt,
@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
 		.blkcipher = {
 		.blkcipher = {
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
 			.setkey		= aes_set_key,
 			.setkey		= aes_set_key,
 			.encrypt	= ctr_crypt,
 			.encrypt	= ctr_crypt,
 			.decrypt	= ctr_crypt,
 			.decrypt	= ctr_crypt,

+ 1 - 0
arch/sparc/crypto/camellia_glue.c

@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
 		.blkcipher = {
 		.blkcipher = {
 			.min_keysize	= CAMELLIA_MIN_KEY_SIZE,
 			.min_keysize	= CAMELLIA_MIN_KEY_SIZE,
 			.max_keysize	= CAMELLIA_MAX_KEY_SIZE,
 			.max_keysize	= CAMELLIA_MAX_KEY_SIZE,
+			.ivsize		= CAMELLIA_BLOCK_SIZE,
 			.setkey		= camellia_set_key,
 			.setkey		= camellia_set_key,
 			.encrypt	= cbc_encrypt,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
 			.decrypt	= cbc_decrypt,

+ 2 - 0
arch/sparc/crypto/des_glue.c

@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
 		.blkcipher = {
 		.blkcipher = {
 			.min_keysize	= DES_KEY_SIZE,
 			.min_keysize	= DES_KEY_SIZE,
 			.max_keysize	= DES_KEY_SIZE,
 			.max_keysize	= DES_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
 			.setkey		= des_set_key,
 			.setkey		= des_set_key,
 			.encrypt	= cbc_encrypt,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
 			.decrypt	= cbc_decrypt,
@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
 		.blkcipher = {
 		.blkcipher = {
 			.min_keysize	= DES3_EDE_KEY_SIZE,
 			.min_keysize	= DES3_EDE_KEY_SIZE,
 			.max_keysize	= DES3_EDE_KEY_SIZE,
 			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES3_EDE_BLOCK_SIZE,
 			.setkey		= des3_ede_set_key,
 			.setkey		= des3_ede_set_key,
 			.encrypt	= cbc3_encrypt,
 			.encrypt	= cbc3_encrypt,
 			.decrypt	= cbc3_decrypt,
 			.decrypt	= cbc3_decrypt,

+ 0 - 1
arch/tile/include/asm/Kbuild

@@ -40,5 +40,4 @@ generic-y += termbits.h
 generic-y += termios.h
 generic-y += termios.h
 generic-y += trace_clock.h
 generic-y += trace_clock.h
 generic-y += types.h
 generic-y += types.h
-generic-y += word-at-a-time.h
 generic-y += xor.h
 generic-y += xor.h

+ 7 - 1
arch/tile/include/asm/word-at-a-time.h

@@ -6,7 +6,7 @@
 struct word_at_a_time { /* unused */ };
 struct word_at_a_time { /* unused */ };
 #define WORD_AT_A_TIME_CONSTANTS {}
 #define WORD_AT_A_TIME_CONSTANTS {}
 
 
-/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
 static inline unsigned long has_zero(unsigned long val, unsigned long *data,
 static inline unsigned long has_zero(unsigned long val, unsigned long *data,
 				     const struct word_at_a_time *c)
 				     const struct word_at_a_time *c)
 {
 {
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
 #endif
 #endif
 }
 }
 
 
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
 #endif /* _ASM_WORD_AT_A_TIME_H */

+ 1 - 0
arch/x86/Kconfig

@@ -1308,6 +1308,7 @@ config HIGHMEM
 config X86_PAE
 config X86_PAE
 	bool "PAE (Physical Address Extension) Support"
 	bool "PAE (Physical Address Extension) Support"
 	depends on X86_32 && !HIGHMEM4G
 	depends on X86_32 && !HIGHMEM4G
+	select SWIOTLB
 	---help---
 	---help---
 	  PAE is required for NX support, and furthermore enables
 	  PAE is required for NX support, and furthermore enables
 	  larger swapspace support for non-overcommit purposes. It
 	  larger swapspace support for non-overcommit purposes. It

+ 5 - 0
arch/x86/crypto/camellia_aesni_avx_glue.c

@@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void)
 {
 {
 	const char *feature_name;
 	const char *feature_name;
 
 
+	if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+		pr_info("AVX or AES-NI instructions are not detected.\n");
+		return -ENODEV;
+	}
+
 	if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
 	if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
 		pr_info("CPU feature '%s' is not supported.\n", feature_name);
 		pr_info("CPU feature '%s' is not supported.\n", feature_name);
 		return -ENODEV;
 		return -ENODEV;

+ 2 - 4
arch/x86/include/asm/kvm_host.h

@@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
 
 int kvm_is_in_guest(void);
 int kvm_is_in_guest(void);
 
 
-int __x86_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem);
-int x86_set_memory_region(struct kvm *kvm,
-			  const struct kvm_userspace_memory_region *mem);
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
 
 

+ 2 - 2
arch/x86/include/asm/xen/hypercall.h

@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
 	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 }
 }
 
 
-static inline int
+static inline long
 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
 {
 {
-	return _hypercall2(int, memory_op, cmd, arg);
+	return _hypercall2(long, memory_op, cmd, arg);
 }
 }
 
 
 static inline int
 static inline int

+ 7 - 3
arch/x86/kvm/emulate.c

@@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	u64 val, cr0, cr4;
 	u64 val, cr0, cr4;
 	u32 base3;
 	u32 base3;
 	u16 selector;
 	u16 selector;
-	int i;
+	int i, r;
 
 
 	for (i = 0; i < 16; i++)
 	for (i = 0; i < 16; i++)
 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	ctxt->ops->set_gdt(ctxt, &dt);
 	ctxt->ops->set_gdt(ctxt, &dt);
 
 
+	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+	if (r != X86EMUL_CONTINUE)
+		return r;
+
 	for (i = 0; i < 6; i++) {
 	for (i = 0; i < 6; i++) {
-		int r = rsm_load_seg_64(ctxt, smbase, i);
+		r = rsm_load_seg_64(ctxt, smbase, i);
 		if (r != X86EMUL_CONTINUE)
 		if (r != X86EMUL_CONTINUE)
 			return r;
 			return r;
 	}
 	}
 
 
-	return rsm_enter_protected_mode(ctxt, cr0, cr4);
+	return X86EMUL_CONTINUE;
 }
 }
 
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 static int em_rsm(struct x86_emulate_ctxt *ctxt)

+ 6 - 20
arch/x86/kvm/vmx.c

@@ -4105,17 +4105,13 @@ static void seg_setup(int seg)
 static int alloc_apic_access_page(struct kvm *kvm)
 static int alloc_apic_access_page(struct kvm *kvm)
 {
 {
 	struct page *page;
 	struct page *page;
-	struct kvm_userspace_memory_region kvm_userspace_mem;
 	int r = 0;
 	int r = 0;
 
 
 	mutex_lock(&kvm->slots_lock);
 	mutex_lock(&kvm->slots_lock);
 	if (kvm->arch.apic_access_page_done)
 	if (kvm->arch.apic_access_page_done)
 		goto out;
 		goto out;
-	kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-	kvm_userspace_mem.flags = 0;
-	kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
-	kvm_userspace_mem.memory_size = PAGE_SIZE;
-	r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+	r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+				    APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
 	if (r)
 	if (r)
 		goto out;
 		goto out;
 
 
@@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm)
 {
 {
 	/* Called with kvm->slots_lock held. */
 	/* Called with kvm->slots_lock held. */
 
 
-	struct kvm_userspace_memory_region kvm_userspace_mem;
 	int r = 0;
 	int r = 0;
 
 
 	BUG_ON(kvm->arch.ept_identity_pagetable_done);
 	BUG_ON(kvm->arch.ept_identity_pagetable_done);
 
 
-	kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-	kvm_userspace_mem.flags = 0;
-	kvm_userspace_mem.guest_phys_addr =
-		kvm->arch.ept_identity_map_addr;
-	kvm_userspace_mem.memory_size = PAGE_SIZE;
-	r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+	r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+				    kvm->arch.ept_identity_map_addr, PAGE_SIZE);
 
 
 	return r;
 	return r;
 }
 }
@@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 {
 {
 	int ret;
 	int ret;
-	struct kvm_userspace_memory_region tss_mem = {
-		.slot = TSS_PRIVATE_MEMSLOT,
-		.guest_phys_addr = addr,
-		.memory_size = PAGE_SIZE * 3,
-		.flags = 0,
-	};
 
 
-	ret = x86_set_memory_region(kvm, &tss_mem);
+	ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+				    PAGE_SIZE * 3);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 	kvm->arch.tss_addr = addr;
 	kvm->arch.tss_addr = addr;

+ 75 - 60
arch/x86/kvm/x86.c

@@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 	return 1;
 	return 1;
 }
 }
 
 
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		!vcpu->arch.apf.halted);
+}
+
 static int vcpu_run(struct kvm_vcpu *vcpu)
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
 {
 	int r;
 	int r;
@@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
 
 	for (;;) {
 	for (;;) {
-		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-		    !vcpu->arch.apf.halted)
+		if (kvm_vcpu_running(vcpu))
 			r = vcpu_enter_guest(vcpu);
 			r = vcpu_enter_guest(vcpu);
 		else
 		else
 			r = vcpu_block(kvm, vcpu);
 			r = vcpu_block(kvm, vcpu);
@@ -7474,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
 	kvm_free_pit(kvm);
 	kvm_free_pit(kvm);
 }
 }
 
 
-int __x86_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem)
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
 {
 	int i, r;
 	int i, r;
+	unsigned long hva;
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memory_slot *slot, old;
 
 
 	/* Called with kvm->slots_lock held.  */
 	/* Called with kvm->slots_lock held.  */
-	BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
+		return -EINVAL;
+
+	slot = id_to_memslot(slots, id);
+	if (size) {
+		if (WARN_ON(slot->npages))
+			return -EEXIST;
+
+		/*
+		 * MAP_SHARED to prevent internal slot pages from being moved
+		 * by fork()/COW.
+		 */
+		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+			      MAP_SHARED | MAP_ANONYMOUS, 0);
+		if (IS_ERR((void *)hva))
+			return PTR_ERR((void *)hva);
+	} else {
+		if (!slot->npages)
+			return 0;
 
 
+		hva = 0;
+	}
+
+	old = *slot;
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-		struct kvm_userspace_memory_region m = *mem;
+		struct kvm_userspace_memory_region m;
 
 
-		m.slot |= i << 16;
+		m.slot = id | (i << 16);
+		m.flags = 0;
+		m.guest_phys_addr = gpa;
+		m.userspace_addr = hva;
+		m.memory_size = size;
 		r = __kvm_set_memory_region(kvm, &m);
 		r = __kvm_set_memory_region(kvm, &m);
 		if (r < 0)
 		if (r < 0)
 			return r;
 			return r;
 	}
 	}
 
 
+	if (!size) {
+		r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+		WARN_ON(r < 0);
+	}
+
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
 
-int x86_set_memory_region(struct kvm *kvm,
-			  const struct kvm_userspace_memory_region *mem)
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
 {
 	int r;
 	int r;
 
 
 	mutex_lock(&kvm->slots_lock);
 	mutex_lock(&kvm->slots_lock);
-	r = __x86_set_memory_region(kvm, mem);
+	r = __x86_set_memory_region(kvm, id, gpa, size);
 	mutex_unlock(&kvm->slots_lock);
 	mutex_unlock(&kvm->slots_lock);
 
 
 	return r;
 	return r;
@@ -7516,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 		 * unless the the memory map has changed due to process exit
 		 * unless the the memory map has changed due to process exit
 		 * or fd copying.
 		 * or fd copying.
 		 */
 		 */
-		struct kvm_userspace_memory_region mem;
-		memset(&mem, 0, sizeof(mem));
-		mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-		x86_set_memory_region(kvm, &mem);
-
-		mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-		x86_set_memory_region(kvm, &mem);
-
-		mem.slot = TSS_PRIVATE_MEMSLOT;
-		x86_set_memory_region(kvm, &mem);
+		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
+		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
+		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
 	}
 	}
 	kvm_iommu_unmap_guest(kvm);
 	kvm_iommu_unmap_guest(kvm);
 	kfree(kvm->arch.vpic);
 	kfree(kvm->arch.vpic);
@@ -7628,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 				const struct kvm_userspace_memory_region *mem,
 				const struct kvm_userspace_memory_region *mem,
 				enum kvm_mr_change change)
 				enum kvm_mr_change change)
 {
 {
-	/*
-	 * Only private memory slots need to be mapped here since
-	 * KVM_SET_MEMORY_REGION ioctl is no longer supported.
-	 */
-	if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
-		unsigned long userspace_addr;
-
-		/*
-		 * MAP_SHARED to prevent internal slot pages from being moved
-		 * by fork()/COW.
-		 */
-		userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
-					 PROT_READ | PROT_WRITE,
-					 MAP_SHARED | MAP_ANONYMOUS, 0);
-
-		if (IS_ERR((void *)userspace_addr))
-			return PTR_ERR((void *)userspace_addr);
-
-		memslot->userspace_addr = userspace_addr;
-	}
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -7710,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 {
 {
 	int nr_mmu_pages = 0;
 	int nr_mmu_pages = 0;
 
 
-	if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
-		int ret;
-
-		ret = vm_munmap(old->userspace_addr,
-				old->npages * PAGE_SIZE);
-		if (ret < 0)
-			printk(KERN_WARNING
-			       "kvm_vm_ioctl_set_memory_region: "
-			       "failed to munmap memory\n");
-	}
-
 	if (!kvm->arch.n_requested_mmu_pages)
 	if (!kvm->arch.n_requested_mmu_pages)
 		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 
 
@@ -7769,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 	kvm_mmu_invalidate_zap_all_pages(kvm);
 	kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 }
 
 
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+	if (!list_empty_careful(&vcpu->async_pf.done))
+		return true;
+
+	if (kvm_apic_has_events(vcpu))
+		return true;
+
+	if (vcpu->arch.pv.pv_unhalted)
+		return true;
+
+	if (atomic_read(&vcpu->arch.nmi_queued))
+		return true;
+
+	if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+		return true;
+
+	if (kvm_arch_interrupt_allowed(vcpu) &&
+	    kvm_cpu_has_interrupt(vcpu))
+		return true;
+
+	return false;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
 {
 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
 		kvm_x86_ops->check_nested_events(vcpu, false);
 		kvm_x86_ops->check_nested_events(vcpu, false);
 
 
-	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-		!vcpu->arch.apf.halted)
-		|| !list_empty_careful(&vcpu->async_pf.done)
-		|| kvm_apic_has_events(vcpu)
-		|| vcpu->arch.pv.pv_unhalted
-		|| atomic_read(&vcpu->arch.nmi_queued) ||
-		(kvm_arch_interrupt_allowed(vcpu) &&
-		 kvm_cpu_has_interrupt(vcpu));
+	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 }
 
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)

+ 24 - 0
arch/x86/xen/enlighten.c

@@ -33,6 +33,10 @@
 #include <linux/memblock.h>
 #include <linux/memblock.h>
 #include <linux/edd.h>
 #include <linux/edd.h>
 
 
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
 #include <xen/xen.h>
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 		/* Fast syscall setup is all done in hypercalls, so
 		/* Fast syscall setup is all done in hypercalls, so
 		   these are all ignored.  Stub them out here to stop
 		   these are all ignored.  Stub them out here to stop
 		   Xen console noise. */
 		   Xen console noise. */
+		break;
 
 
 	default:
 	default:
 		if (!pmu_msr_write(msr, low, high, &ret))
 		if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
 	.notifier_call	= xen_hvm_cpu_notify,
 	.notifier_call	= xen_hvm_cpu_notify,
 };
 };
 
 
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+	native_machine_shutdown();
+	if (kexec_in_progress)
+		xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+	native_machine_crash_shutdown(regs);
+	xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
 static void __init xen_hvm_guest_init(void)
 static void __init xen_hvm_guest_init(void)
 {
 {
 	if (xen_pv_domain())
 	if (xen_pv_domain())
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
 	x86_init.irqs.intr_init = xen_init_IRQ;
 	x86_init.irqs.intr_init = xen_init_IRQ;
 	xen_hvm_init_time_ops();
 	xen_hvm_init_time_ops();
 	xen_hvm_init_mmu_ops();
 	xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+	machine_ops.shutdown = xen_hvm_shutdown;
+	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
 }
 }
 #endif
 #endif
 
 

+ 18 - 1
arch/x86/xen/p2m.c

@@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
 static pte_t *p2m_missing_pte;
 static pte_t *p2m_missing_pte;
 static pte_t *p2m_identity_pte;
 static pte_t *p2m_identity_pte;
 
 
+/*
+ * Hint at last populated PFN.
+ *
+ * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
+ * can avoid scanning the whole P2M (which may be sized to account for
+ * hotplugged memory).
+ */
+static unsigned long xen_p2m_last_pfn;
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
 {
 	BUG_ON(pfn >= MAX_P2M_PFN);
 	BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
 	else
 	else
 		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
 		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
 			virt_to_mfn(p2m_top_mfn);
 			virt_to_mfn(p2m_top_mfn);
-	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+	HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
 	HYPERVISOR_shared_info->arch.p2m_generation = 0;
 	HYPERVISOR_shared_info->arch.p2m_generation = 0;
 	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
 	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
 	HYPERVISOR_shared_info->arch.p2m_cr3 =
 	HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
 	static struct vm_struct vm;
 	static struct vm_struct vm;
 	unsigned long p2m_limit;
 	unsigned long p2m_limit;
 
 
+	xen_p2m_last_pfn = xen_max_p2m_pfn;
+
 	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
 	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
 	vm.flags = VM_ALLOC;
 	vm.flags = VM_ALLOC;
 	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
 	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
 			free_p2m_page(p2m);
 			free_p2m_page(p2m);
 	}
 	}
 
 
+	/* Expanded the p2m? */
+	if (pfn > xen_p2m_last_pfn) {
+		xen_p2m_last_pfn = pfn;
+		HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+	}
+
 	return true;
 	return true;
 }
 }
 
 

+ 2 - 2
arch/x86/xen/setup.c

@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
 {
 {
 	unsigned long max_pages, limit;
 	unsigned long max_pages, limit;
 	domid_t domid = DOMID_SELF;
 	domid_t domid = DOMID_SELF;
-	int ret;
+	long ret;
 
 
 	limit = xen_get_pages_limit();
 	limit = xen_get_pages_limit();
 	max_pages = limit;
 	max_pages = limit;
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
 		xen_ignore_unusable();
 		xen_ignore_unusable();
 
 
 	/* Make sure the Xen-supplied memory map is well-ordered. */
 	/* Make sure the Xen-supplied memory map is well-ordered. */
-	sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+	sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
 			  &xen_e820_map_entries);
 			  &xen_e820_map_entries);
 
 
 	max_pages = xen_get_max_pages();
 	max_pages = xen_get_max_pages();

+ 2 - 1
crypto/ahash.c

@@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
 	struct crypto_alg *base = &alg->halg.base;
 	struct crypto_alg *base = &alg->halg.base;
 
 
 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
-	    alg->halg.statesize > PAGE_SIZE / 8)
+	    alg->halg.statesize > PAGE_SIZE / 8 ||
+	    alg->halg.statesize == 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	base->cra_type = &crypto_ahash_type;
 	base->cra_type = &crypto_ahash_type;

+ 1 - 0
drivers/acpi/acpica/acglobal.h

@@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
 
 
 #if (!ACPI_REDUCED_HARDWARE)
 #if (!ACPI_REDUCED_HARDWARE)
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);

+ 1 - 3
drivers/acpi/acpica/actables.h

@@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
 /*
 /*
  * tbfadt - FADT parse/convert/validate
  * tbfadt - FADT parse/convert/validate
  */
  */
-void acpi_tb_parse_fadt(u32 table_index);
+void acpi_tb_parse_fadt(void);
 
 
 void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
 void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
 
 
@@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id);
  */
  */
 acpi_status acpi_tb_initialize_facs(void);
 acpi_status acpi_tb_initialize_facs(void);
 
 
-u8 acpi_tb_tables_loaded(void);
-
 void
 void
 acpi_tb_print_table_header(acpi_physical_address address,
 acpi_tb_print_table_header(acpi_physical_address address,
 			   struct acpi_table_header *header);
 			   struct acpi_table_header *header);

+ 1 - 1
drivers/acpi/acpica/evxfevnt.c

@@ -71,7 +71,7 @@ acpi_status acpi_enable(void)
 
 
 	/* ACPI tables must be present */
 	/* ACPI tables must be present */
 
 
-	if (!acpi_tb_tables_loaded()) {
+	if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) {
 		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
 		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
 	}
 	}
 
 

+ 5 - 5
drivers/acpi/acpica/tbfadt.c

@@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
  *
  *
  * FUNCTION:    acpi_tb_parse_fadt
  * FUNCTION:    acpi_tb_parse_fadt
  *
  *
- * PARAMETERS:  table_index         - Index for the FADT
+ * PARAMETERS:  None
  *
  *
  * RETURN:      None
  * RETURN:      None
  *
  *
@@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64)
  *
  *
  ******************************************************************************/
  ******************************************************************************/
 
 
-void acpi_tb_parse_fadt(u32 table_index)
+void acpi_tb_parse_fadt(void)
 {
 {
 	u32 length;
 	u32 length;
 	struct acpi_table_header *table;
 	struct acpi_table_header *table;
@@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index)
 	 * Get a local copy of the FADT and convert it to a common format
 	 * Get a local copy of the FADT and convert it to a common format
 	 * Map entire FADT, assumed to be smaller than one page.
 	 * Map entire FADT, assumed to be smaller than one page.
 	 */
 	 */
-	length = acpi_gbl_root_table_list.tables[table_index].length;
+	length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
 
 
 	table =
 	table =
-	    acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
-			       address, length);
+	    acpi_os_map_memory(acpi_gbl_root_table_list.
+			       tables[acpi_gbl_fadt_index].address, length);
 	if (!table) {
 	if (!table) {
 		return;
 		return;
 	}
 	}

+ 2 - 24
drivers/acpi/acpica/tbutils.c

@@ -97,29 +97,6 @@ acpi_status acpi_tb_initialize_facs(void)
 }
 }
 #endif				/* !ACPI_REDUCED_HARDWARE */
 #endif				/* !ACPI_REDUCED_HARDWARE */
 
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_tables_loaded
- *
- * PARAMETERS:  None
- *
- * RETURN:      TRUE if required ACPI tables are loaded
- *
- * DESCRIPTION: Determine if the minimum required ACPI tables are present
- *              (FADT, FACS, DSDT)
- *
- ******************************************************************************/
-
-u8 acpi_tb_tables_loaded(void)
-{
-
-	if (acpi_gbl_root_table_list.current_table_count >= 4) {
-		return (TRUE);
-	}
-
-	return (FALSE);
-}
-
 /*******************************************************************************
 /*******************************************************************************
  *
  *
  * FUNCTION:    acpi_tb_check_dsdt_header
  * FUNCTION:    acpi_tb_check_dsdt_header
@@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
 		    ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
 		    ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
 				      tables[table_index].signature,
 				      tables[table_index].signature,
 				      ACPI_SIG_FADT)) {
 				      ACPI_SIG_FADT)) {
-			acpi_tb_parse_fadt(table_index);
+			acpi_gbl_fadt_index = table_index;
+			acpi_tb_parse_fadt();
 		}
 		}
 
 
 next_table:
 next_table:

+ 6 - 16
drivers/base/power/domain_governor.c

@@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev)
 				      dev_update_qos_constraint);
 				      dev_update_qos_constraint);
 
 
 	if (constraint_ns > 0) {
 	if (constraint_ns > 0) {
-		constraint_ns -= td->start_latency_ns;
+		constraint_ns -= td->save_state_latency_ns +
+				td->stop_latency_ns +
+				td->start_latency_ns +
+				td->restore_state_latency_ns;
 		if (constraint_ns == 0)
 		if (constraint_ns == 0)
 			return false;
 			return false;
 	}
 	}
 	td->effective_constraint_ns = constraint_ns;
 	td->effective_constraint_ns = constraint_ns;
-	td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
-				constraint_ns == 0;
+	td->cached_stop_ok = constraint_ns >= 0;
+
 	/*
 	/*
 	 * The children have been suspended already, so we don't need to take
 	 * The children have been suspended already, so we don't need to take
 	 * their stop latencies into account here.
 	 * their stop latencies into account here.
@@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
 
 
 	off_on_time_ns = genpd->power_off_latency_ns +
 	off_on_time_ns = genpd->power_off_latency_ns +
 				genpd->power_on_latency_ns;
 				genpd->power_on_latency_ns;
-	/*
-	 * It doesn't make sense to remove power from the domain if saving
-	 * the state of all devices in it and the power off/power on operations
-	 * take too much time.
-	 *
-	 * All devices in this domain have been stopped already at this point.
-	 */
-	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
-		if (pdd->dev->driver)
-			off_on_time_ns +=
-				to_gpd_data(pdd)->td.save_state_latency_ns;
-	}
 
 
 	min_off_time_ns = -1;
 	min_off_time_ns = -1;
 	/*
 	/*
@@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
 		 * constraint_ns cannot be negative here, because the device has
 		 * constraint_ns cannot be negative here, because the device has
 		 * been suspended.
 		 * been suspended.
 		 */
 		 */
-		constraint_ns -= td->restore_state_latency_ns;
 		if (constraint_ns <= off_on_time_ns)
 		if (constraint_ns <= off_on_time_ns)
 			return false;
 			return false;
 
 

+ 2 - 3
drivers/base/regmap/regmap-debugfs.c

@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
 /* Calculate the length of a fixed format  */
 /* Calculate the length of a fixed format  */
 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
 {
 {
-	snprintf(buf, buf_size, "%x", max_val);
-	return strlen(buf);
+	return snprintf(NULL, 0, "%x", max_val);
 }
 }
 
 
 static ssize_t regmap_name_read_file(struct file *file,
 static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
 		/* If we're in the region the user is trying to read */
 		/* If we're in the region the user is trying to read */
 		if (p >= *ppos) {
 		if (p >= *ppos) {
 			/* ...but not beyond it */
 			/* ...but not beyond it */
-			if (buf_pos >= count - 1 - tot_len)
+			if (buf_pos + tot_len + 1 >= count)
 				break;
 				break;
 
 
 			/* Format the register */
 			/* Format the register */

+ 8 - 2
drivers/block/rbd.c

@@ -1863,9 +1863,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
 		rbd_osd_read_callback(obj_request);
 		rbd_osd_read_callback(obj_request);
 		break;
 		break;
 	case CEPH_OSD_OP_SETALLOCHINT:
 	case CEPH_OSD_OP_SETALLOCHINT:
-		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
+		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
+			   osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
 		/* fall through */
 		/* fall through */
 	case CEPH_OSD_OP_WRITE:
 	case CEPH_OSD_OP_WRITE:
+	case CEPH_OSD_OP_WRITEFULL:
 		rbd_osd_write_callback(obj_request);
 		rbd_osd_write_callback(obj_request);
 		break;
 		break;
 	case CEPH_OSD_OP_STAT:
 	case CEPH_OSD_OP_STAT:
@@ -2401,7 +2403,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
 				opcode = CEPH_OSD_OP_ZERO;
 				opcode = CEPH_OSD_OP_ZERO;
 		}
 		}
 	} else if (op_type == OBJ_OP_WRITE) {
 	} else if (op_type == OBJ_OP_WRITE) {
-		opcode = CEPH_OSD_OP_WRITE;
+		if (!offset && length == object_size)
+			opcode = CEPH_OSD_OP_WRITEFULL;
+		else
+			opcode = CEPH_OSD_OP_WRITE;
 		osd_req_op_alloc_hint_init(osd_request, num_ops,
 		osd_req_op_alloc_hint_init(osd_request, num_ops,
 					object_size, object_size);
 					object_size, object_size);
 		num_ops++;
 		num_ops++;
@@ -3760,6 +3765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
 	/* set io sizes to object size */
 	/* set io sizes to object size */
 	segment_size = rbd_obj_bytes(&rbd_dev->header);
 	segment_size = rbd_obj_bytes(&rbd_dev->header);
 	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
 	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+	q->limits.max_sectors = queue_max_hw_sectors(q);
 	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
 	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
 	blk_queue_max_segment_size(q, segment_size);
 	blk_queue_max_segment_size(q, segment_size);
 	blk_queue_io_min(q, segment_size);
 	blk_queue_io_min(q, segment_size);

+ 0 - 1
drivers/bus/Kconfig

@@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL
 
 
 config ARM_CCI500_PMU
 config ARM_CCI500_PMU
 	bool "ARM CCI500 PMU support"
 	bool "ARM CCI500 PMU support"
-	default y
 	depends on (ARM && CPU_V7) || ARM64
 	depends on (ARM && CPU_V7) || ARM64
 	depends on PERF_EVENTS
 	depends on PERF_EVENTS
 	select ARM_CCI_PMU
 	select ARM_CCI_PMU

+ 3 - 1
drivers/clk/mvebu/clk-cpu.c

@@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
 	for_each_node_by_type(dn, "cpu") {
 	for_each_node_by_type(dn, "cpu") {
 		struct clk_init_data init;
 		struct clk_init_data init;
 		struct clk *clk;
 		struct clk *clk;
+		struct clk *parent_clk;
 		char *clk_name = kzalloc(5, GFP_KERNEL);
 		char *clk_name = kzalloc(5, GFP_KERNEL);
 		int cpu, err;
 		int cpu, err;
 
 
@@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node)
 			goto bail_out;
 			goto bail_out;
 
 
 		sprintf(clk_name, "cpu%d", cpu);
 		sprintf(clk_name, "cpu%d", cpu);
+		parent_clk = of_clk_get(node, 0);
 
 
-		cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
+		cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
 		cpuclk[cpu].clk_name = clk_name;
 		cpuclk[cpu].clk_name = clk_name;
 		cpuclk[cpu].cpu = cpu;
 		cpuclk[cpu].cpu = cpu;
 		cpuclk[cpu].reg_base = clock_complex_base;
 		cpuclk[cpu].reg_base = clock_complex_base;

+ 5 - 5
drivers/clk/samsung/clk-cpu.c

@@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
 	 * the values for DIV_COPY and DIV_HPM dividers need not be set.
 	 * the values for DIV_COPY and DIV_HPM dividers need not be set.
 	 */
 	 */
 	div0 = cfg_data->div0;
 	div0 = cfg_data->div0;
-	if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
 		div1 = cfg_data->div1;
 		div1 = cfg_data->div1;
 		if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
 		if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
 			div1 = readl(base + E4210_DIV_CPU1) &
 			div1 = readl(base + E4210_DIV_CPU1) &
@@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
 		alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
 		alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
 		WARN_ON(alt_div >= MAX_DIV);
 		WARN_ON(alt_div >= MAX_DIV);
 
 
-		if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+		if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
 			/*
 			/*
 			 * In Exynos4210, ATB clock parent is also mout_core. So
 			 * In Exynos4210, ATB clock parent is also mout_core. So
 			 * ATB clock also needs to be mantained at safe speed.
 			 * ATB clock also needs to be mantained at safe speed.
@@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
 	writel(div0, base + E4210_DIV_CPU0);
 	writel(div0, base + E4210_DIV_CPU0);
 	wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
 	wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
 
 
-	if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
 		writel(div1, base + E4210_DIV_CPU1);
 		writel(div1, base + E4210_DIV_CPU1);
 		wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
 		wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
 				DIV_MASK_ALL);
 				DIV_MASK_ALL);
@@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
 	unsigned long mux_reg;
 	unsigned long mux_reg;
 
 
 	/* find out the divider values to use for clock data */
 	/* find out the divider values to use for clock data */
-	if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
 		while ((cfg_data->prate * 1000) != ndata->new_rate) {
 		while ((cfg_data->prate * 1000) != ndata->new_rate) {
 			if (cfg_data->prate == 0)
 			if (cfg_data->prate == 0)
 				return -EINVAL;
 				return -EINVAL;
@@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
 	writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
 	writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
 	wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
 	wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
 
 
-	if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
 		div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
 		div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
 		div_mask |= E4210_DIV0_ATB_MASK;
 		div_mask |= E4210_DIV0_ATB_MASK;
 	}
 	}

+ 1 - 1
drivers/clk/ti/clk-3xxx.c

@@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
 	DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
 	DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
 	DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
 	DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
 	DT_CLK(NULL, "uart3_ick", "uart3_ick"),
 	DT_CLK(NULL, "uart3_ick", "uart3_ick"),
-	DT_CLK(NULL, "uart4_ick", "uart4_ick"),
 	DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
 	DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
 	DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
 	DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
 	DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
 	DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = {
 static struct ti_dt_clk omap36xx_clks[] = {
 static struct ti_dt_clk omap36xx_clks[] = {
 	DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
 	DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
 	DT_CLK(NULL, "uart4_fck", "uart4_fck"),
 	DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+	DT_CLK(NULL, "uart4_ick", "uart4_ick"),
 	{ .node_name = NULL },
 	{ .node_name = NULL },
 };
 };
 
 

+ 1 - 17
drivers/clk/ti/clk-7xx.c

@@ -18,7 +18,6 @@
 
 
 #include "clock.h"
 #include "clock.h"
 
 
-#define DRA7_DPLL_ABE_DEFFREQ				180633600
 #define DRA7_DPLL_GMAC_DEFFREQ				1000000000
 #define DRA7_DPLL_GMAC_DEFFREQ				1000000000
 #define DRA7_DPLL_USB_DEFFREQ				960000000
 #define DRA7_DPLL_USB_DEFFREQ				960000000
 
 
@@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = {
 int __init dra7xx_dt_clk_init(void)
 int __init dra7xx_dt_clk_init(void)
 {
 {
 	int rc;
 	int rc;
-	struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck;
+	struct clk *dpll_ck, *hdcp_ck;
 
 
 	ti_dt_clocks_register(dra7xx_clks);
 	ti_dt_clocks_register(dra7xx_clks);
 
 
 	omap2_clk_disable_autoidle_all();
 	omap2_clk_disable_autoidle_all();
 
 
-	abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
-	sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
-	dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
-
-	rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
-	if (!rc)
-		rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
-	if (rc)
-		pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-
-	dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
-	rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
-	if (rc)
-		pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
-
 	dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
 	dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
 	rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
 	rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
 	if (rc)
 	if (rc)

+ 2 - 2
drivers/clk/ti/clkt_dflt.c

@@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
 		}
 		}
 	}
 	}
 
 
-	if (unlikely(!clk->enable_reg)) {
+	if (unlikely(IS_ERR(clk->enable_reg))) {
 		pr_err("%s: %s missing enable_reg\n", __func__,
 		pr_err("%s: %s missing enable_reg\n", __func__,
 		       clk_hw_get_name(hw));
 		       clk_hw_get_name(hw));
 		ret = -EINVAL;
 		ret = -EINVAL;
@@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw)
 	u32 v;
 	u32 v;
 
 
 	clk = to_clk_hw_omap(hw);
 	clk = to_clk_hw_omap(hw);
-	if (!clk->enable_reg) {
+	if (IS_ERR(clk->enable_reg)) {
 		/*
 		/*
 		 * 'independent' here refers to a clock which is not
 		 * 'independent' here refers to a clock which is not
 		 * controlled by its parent.
 		 * controlled by its parent.

+ 3 - 0
drivers/cpufreq/acpi-cpufreq.c

@@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
 {
 {
 	struct acpi_cpufreq_data *data = policy->driver_data;
 	struct acpi_cpufreq_data *data = policy->driver_data;
 
 
+	if (unlikely(!data))
+		return -ENODEV;
+
 	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
 	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
 }
 }
 
 

+ 3 - 1
drivers/cpufreq/cpufreq.c

@@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu)
 	 * since this is a core component, and is essential for the
 	 * since this is a core component, and is essential for the
 	 * subsequent light-weight ->init() to succeed.
 	 * subsequent light-weight ->init() to succeed.
 	 */
 	 */
-	if (cpufreq_driver->exit)
+	if (cpufreq_driver->exit) {
 		cpufreq_driver->exit(policy);
 		cpufreq_driver->exit(policy);
+		policy->freq_table = NULL;
+	}
 }
 }
 
 
 /**
 /**

+ 5 - 0
drivers/cpufreq/intel_pstate.c

@@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
 	local_irq_save(flags);
 	local_irq_save(flags);
 	rdmsrl(MSR_IA32_APERF, aperf);
 	rdmsrl(MSR_IA32_APERF, aperf);
 	rdmsrl(MSR_IA32_MPERF, mperf);
 	rdmsrl(MSR_IA32_MPERF, mperf);
+	if (cpu->prev_mperf == mperf) {
+		local_irq_restore(flags);
+		return;
+	}
+
 	tsc = rdtsc();
 	tsc = rdtsc();
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 
 

+ 4 - 3
drivers/devfreq/devfreq.c

@@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
 	if (err) {
 	if (err) {
 		put_device(&devfreq->dev);
 		put_device(&devfreq->dev);
 		mutex_unlock(&devfreq->lock);
 		mutex_unlock(&devfreq->lock);
-		goto err_dev;
+		goto err_out;
 	}
 	}
 
 
 	mutex_unlock(&devfreq->lock);
 	mutex_unlock(&devfreq->lock);
@@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
 err_init:
 err_init:
 	list_del(&devfreq->node);
 	list_del(&devfreq->node);
 	device_unregister(&devfreq->dev);
 	device_unregister(&devfreq->dev);
-err_dev:
 	kfree(devfreq);
 	kfree(devfreq);
 err_out:
 err_out:
 	return ERR_PTR(err);
 	return ERR_PTR(err);
@@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
 		ret = PTR_ERR(governor);
 		ret = PTR_ERR(governor);
 		goto out;
 		goto out;
 	}
 	}
-	if (df->governor == governor)
+	if (df->governor == governor) {
+		ret = 0;
 		goto out;
 		goto out;
+	}
 
 
 	if (df->governor) {
 	if (df->governor) {
 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);

+ 6 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c

@@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
 		/* disp clock */
 		/* disp clock */
 		adev->clock.default_dispclk =
 		adev->clock.default_dispclk =
 			le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
 			le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
-		if (adev->clock.default_dispclk == 0)
-			adev->clock.default_dispclk = 54000; /* 540 Mhz */
+		/* set a reasonable default for DP */
+		if (adev->clock.default_dispclk < 53900) {
+			DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+				 adev->clock.default_dispclk / 100);
+			adev->clock.default_dispclk = 60000;
+		}
 		adev->clock.dp_extclk =
 		adev->clock.dp_extclk =
 			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
 			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
 		adev->clock.current_dispclk = adev->clock.default_dispclk;
 		adev->clock.current_dispclk = adev->clock.default_dispclk;

+ 3 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -177,7 +177,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
 
 	/* get chunks */
 	/* get chunks */
 	INIT_LIST_HEAD(&p->validated);
 	INIT_LIST_HEAD(&p->validated);
-	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
+	chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
 	if (copy_from_user(chunk_array, chunk_array_user,
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
 		ret = -EFAULT;
 		ret = -EFAULT;
@@ -197,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 		struct drm_amdgpu_cs_chunk user_chunk;
 		struct drm_amdgpu_cs_chunk user_chunk;
 		uint32_t __user *cdata;
 		uint32_t __user *cdata;
 
 
-		chunk_ptr = (void __user *)chunk_array[i];
+		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
 		if (copy_from_user(&user_chunk, chunk_ptr,
 		if (copy_from_user(&user_chunk, chunk_ptr,
 				       sizeof(struct drm_amdgpu_cs_chunk))) {
 				       sizeof(struct drm_amdgpu_cs_chunk))) {
 			ret = -EFAULT;
 			ret = -EFAULT;
@@ -208,7 +208,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 		p->chunks[i].length_dw = user_chunk.length_dw;
 		p->chunks[i].length_dw = user_chunk.length_dw;
 
 
 		size = p->chunks[i].length_dw;
 		size = p->chunks[i].length_dw;
-		cdata = (void __user *)user_chunk.chunk_data;
+		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
 		p->chunks[i].user_ptr = cdata;
 		p->chunks[i].user_ptr = cdata;
 
 
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));

+ 0 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c

@@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	/* We borrow the event spin lock for protecting flip_status */
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
 
-	/* set the proper interrupt */
-	amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
 	/* do the flip (mmio) */
 	/* do the flip (mmio) */
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
 	/* set the flip status */
 	/* set the flip status */

+ 5 - 5
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -242,11 +242,11 @@ static struct pci_device_id pciidlist[] = {
 	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
 #endif
 	/* topaz */
 	/* topaz */
-	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
 	/* tonga */
 	/* tonga */
 	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},

+ 16 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c

@@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
 		return true;
 		return true;
 	return false;
 	return false;
 }
 }
+
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
+{
+	struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+	struct drm_fb_helper *fb_helper;
+	int ret;
+
+	if (!afbdev)
+		return;
+
+	fb_helper = &afbdev->helper;
+
+	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+	if (ret)
+		DRM_DEBUG("failed to restore crtc mode\n");
+}

+ 4 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

@@ -485,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  * Outdated mess for old drm with Xorg being in charge (void function now).
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
  */
 /**
 /**
- * amdgpu_driver_firstopen_kms - drm callback for last close
+ * amdgpu_driver_lastclose_kms - drm callback for last close
  *
  *
  * @dev: drm dev pointer
  * @dev: drm dev pointer
  *
  *
@@ -493,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  */
  */
 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
 {
 {
+	struct amdgpu_device *adev = dev->dev_private;
+
+	amdgpu_fbdev_restore_mode(adev);
 	vga_switcheroo_process_delayed_switch();
 	vga_switcheroo_process_delayed_switch();
 }
 }
 
 

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h

@@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
 
 
 void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
 void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
 
 

+ 3 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
-	if (r)
+	if (r) {
+		kfree(ib);
 		return r;
 		return r;
+	}
 	ib->length_dw = 0;
 	ib->length_dw = 0;
 
 
 	/* walk over the address space and update the page directory */
 	/* walk over the address space and update the page directory */

+ 5 - 3
drivers/gpu/drm/amd/amdgpu/ci_dpm.c

@@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
 	if (!amdgpu_dpm)
 	if (!amdgpu_dpm)
 		return 0;
 		return 0;
 
 
+	/* init the sysfs and debugfs files late */
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		return ret;
+
 	ret = ci_set_temperature_range(adev);
 	ret = ci_set_temperature_range(adev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
 	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
 	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
 	if (amdgpu_dpm == 1)
 	if (amdgpu_dpm == 1)
 		amdgpu_pm_print_power_states(adev);
 		amdgpu_pm_print_power_states(adev);
-	ret = amdgpu_pm_sysfs_init(adev);
-	if (ret)
-		goto dpm_failed;
 	mutex_unlock(&adev->pm.mutex);
 	mutex_unlock(&adev->pm.mutex);
 	DRM_INFO("amdgpu: dpm initialized\n");
 	DRM_INFO("amdgpu: dpm initialized\n");
 
 

+ 3 - 0
drivers/gpu/drm/amd/amdgpu/cik.c

@@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
 	int ret, i;
 	int ret, i;
 	u16 tmp16;
 	u16 tmp16;
 
 
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
 	if (amdgpu_pcie_gen2 == 0)
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 		return;
 
 

+ 6 - 4
drivers/gpu/drm/amd/amdgpu/cz_dpm.c

@@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 
 	if (amdgpu_dpm) {
 	if (amdgpu_dpm) {
+		int ret;
+		/* init the sysfs and debugfs files late */
+		ret = amdgpu_pm_sysfs_init(adev);
+		if (ret)
+			return ret;
+
 		/* powerdown unused blocks for now */
 		/* powerdown unused blocks for now */
 		cz_dpm_powergate_uvd(adev, true);
 		cz_dpm_powergate_uvd(adev, true);
 		cz_dpm_powergate_vce(adev, true);
 		cz_dpm_powergate_vce(adev, true);
@@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
 	if (amdgpu_dpm == 1)
 	if (amdgpu_dpm == 1)
 		amdgpu_pm_print_power_states(adev);
 		amdgpu_pm_print_power_states(adev);
 
 
-	ret = amdgpu_pm_sysfs_init(adev);
-	if (ret)
-		goto dpm_init_failed;
-
 	mutex_unlock(&adev->pm.mutex);
 	mutex_unlock(&adev->pm.mutex);
 	DRM_INFO("amdgpu: dpm initialized\n");
 	DRM_INFO("amdgpu: dpm initialized\n");
 
 

+ 28 - 2
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c

@@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 }
 
 
+static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
 /**
  * dce_v10_0_page_flip - pageflip callback.
  * dce_v10_0_page_flip - pageflip callback.
  *
  *
@@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
 		dce_v10_0_vga_enable(crtc, true);
 		dce_v10_0_vga_enable(crtc, true);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		dce_v10_0_vga_enable(crtc, false);
 		dce_v10_0_vga_enable(crtc, false);
-		/* Make sure VBLANK interrupt is still enabled */
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		dce_v10_0_crtc_load_lut(crtc);
 		dce_v10_0_crtc_load_lut(crtc);
 		break;
 		break;
@@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle)
 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 	}
 
 
+	dce_v10_0_pageflip_interrupt_init(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle)
 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 	}
 
 
+	dce_v10_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle)
 
 
 	dce_v10_0_hpd_fini(adev);
 	dce_v10_0_hpd_fini(adev);
 
 
+	dce_v10_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle)
 	/* initialize hpd */
 	/* initialize hpd */
 	dce_v10_0_hpd_init(adev);
 	dce_v10_0_hpd_init(adev);
 
 
+	dce_v10_0_pageflip_interrupt_init(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
 
 	return 0;
 	return 0;

+ 29 - 3
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c

@@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 }
 
 
+static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
 /**
  * dce_v11_0_page_flip - pageflip callback.
  * dce_v11_0_page_flip - pageflip callback.
  *
  *
@@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
 		dce_v11_0_vga_enable(crtc, true);
 		dce_v11_0_vga_enable(crtc, true);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		dce_v11_0_vga_enable(crtc, false);
 		dce_v11_0_vga_enable(crtc, false);
-		/* Make sure VBLANK interrupt is still enabled */
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		dce_v11_0_crtc_load_lut(crtc);
 		dce_v11_0_crtc_load_lut(crtc);
 		break;
 		break;
@@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle)
 
 
 	switch (adev->asic_type) {
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
 	case CHIP_CARRIZO:
-		adev->mode_info.num_crtc = 4;
+		adev->mode_info.num_crtc = 3;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 9;
 		adev->mode_info.num_dig = 9;
 		break;
 		break;
@@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle)
 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 	}
 
 
+	dce_v11_0_pageflip_interrupt_init(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle)
 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 	}
 
 
+	dce_v11_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle)
 
 
 	dce_v11_0_hpd_fini(adev);
 	dce_v11_0_hpd_fini(adev);
 
 
+	dce_v11_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle)
 	/* initialize hpd */
 	/* initialize hpd */
 	dce_v11_0_hpd_init(adev);
 	dce_v11_0_hpd_init(adev);
 
 
+	dce_v11_0_pageflip_interrupt_init(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
 
 	return 0;
 	return 0;

+ 28 - 2
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c

@@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 }
 
 
+static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
 /**
  * dce_v8_0_page_flip - pageflip callback.
  * dce_v8_0_page_flip - pageflip callback.
  *
  *
@@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
 		dce_v8_0_vga_enable(crtc, true);
 		dce_v8_0_vga_enable(crtc, true);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		dce_v8_0_vga_enable(crtc, false);
 		dce_v8_0_vga_enable(crtc, false);
-		/* Make sure VBLANK interrupt is still enabled */
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		dce_v8_0_crtc_load_lut(crtc);
 		dce_v8_0_crtc_load_lut(crtc);
 		break;
 		break;
@@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle)
 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 	}
 
 
+	dce_v8_0_pageflip_interrupt_init(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle)
 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 	}
 
 
+	dce_v8_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle)
 
 
 	dce_v8_0_hpd_fini(adev);
 	dce_v8_0_hpd_fini(adev);
 
 
+	dce_v8_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle)
 	/* initialize hpd */
 	/* initialize hpd */
 	dce_v8_0_hpd_init(adev);
 	dce_v8_0_hpd_init(adev);
 
 
+	dce_v8_0_pageflip_interrupt_init(adev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
 
 	return 0;
 	return 0;

+ 6 - 3
drivers/gpu/drm/amd/amdgpu/kv_dpm.c

@@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle)
 {
 {
 	/* powerdown unused blocks for now */
 	/* powerdown unused blocks for now */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret;
+
+	/* init the sysfs and debugfs files late */
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		return ret;
 
 
 	kv_dpm_powergate_acp(adev, true);
 	kv_dpm_powergate_acp(adev, true);
 	kv_dpm_powergate_samu(adev, true);
 	kv_dpm_powergate_samu(adev, true);
@@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle)
 	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
 	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
 	if (amdgpu_dpm == 1)
 	if (amdgpu_dpm == 1)
 		amdgpu_pm_print_power_states(adev);
 		amdgpu_pm_print_power_states(adev);
-	ret = amdgpu_pm_sysfs_init(adev);
-	if (ret)
-		goto dpm_failed;
 	mutex_unlock(&adev->pm.mutex);
 	mutex_unlock(&adev->pm.mutex);
 	DRM_INFO("amdgpu: dpm initialized\n");
 	DRM_INFO("amdgpu: dpm initialized\n");
 
 

+ 3 - 0
drivers/gpu/drm/amd/amdgpu/vi.c

@@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
 	u32 mask;
 	u32 mask;
 	int ret;
 	int ret;
 
 
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
 	if (amdgpu_pcie_gen2 == 0)
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 		return;
 
 

部分文件因文件數量過多而無法顯示