Эх сурвалжийг харах

Merge branch 'ib-mfd-regulator-gpio-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd into devel

Linus Walleij 9 жил өмнө
parent
commit
0bae2f1732
100 өөрчлөгдсөн 736 нэмэгдсэн , 499 устгасан
  1. 1 0
      .mailmap
  2. 1 1
      Documentation/Intel-IOMMU.txt
  3. 50 0
      Documentation/devicetree/bindings/mfd/tps65912.txt
  4. 1 1
      Documentation/virtual/kvm/api.txt
  5. 1 1
      Makefile
  6. 9 0
      arch/arm/boot/compressed/Makefile
  7. 1 0
      arch/arm/include/uapi/asm/unistd.h
  8. 1 0
      arch/arm/kernel/calls.S
  9. 2 0
      arch/arm64/Makefile
  10. 25 17
      arch/arm64/configs/defconfig
  11. 10 11
      arch/arm64/include/asm/pgtable.h
  12. 5 0
      arch/arm64/kernel/head.S
  13. 25 15
      arch/arm64/kernel/image.h
  14. 1 1
      arch/arm64/mm/dump.c
  15. 9 0
      arch/arm64/mm/kasan_init.c
  16. 3 0
      arch/arm64/mm/pageattr.c
  17. 12 0
      arch/arm64/mm/proc-macros.S
  18. 2 2
      arch/arm64/mm/proc.S
  19. 11 0
      arch/mips/bcm63xx/nvram.c
  20. 2 0
      arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
  21. 3 1
      arch/powerpc/include/asm/book3s/64/hash.h
  22. 0 1
      arch/powerpc/include/asm/book3s/64/pgtable.h
  23. 1 2
      arch/powerpc/include/asm/kvm_host.h
  24. 1 0
      arch/powerpc/include/asm/systbl.h
  25. 1 1
      arch/powerpc/include/asm/unistd.h
  26. 1 0
      arch/powerpc/include/uapi/asm/unistd.h
  27. 15 18
      arch/powerpc/kernel/eeh_pe.c
  28. 0 28
      arch/powerpc/kernel/misc_64.S
  29. 9 3
      arch/powerpc/kernel/module_64.c
  30. 0 3
      arch/powerpc/kvm/book3s_64_mmu.c
  31. 18 0
      arch/powerpc/kvm/book3s_hv.c
  32. 3 1
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  33. 10 10
      arch/powerpc/kvm/powerpc.c
  34. 2 2
      arch/powerpc/mm/mem.c
  35. 1 1
      arch/powerpc/perf/power8-pmu.c
  36. 7 2
      arch/s390/include/asm/irqflags.h
  37. 0 1
      arch/s390/include/asm/kvm_host.h
  38. 9 5
      arch/s390/include/asm/pci_io.h
  39. 2 2
      arch/s390/include/asm/processor.h
  40. 3 3
      arch/s390/include/asm/ptrace.h
  41. 2 1
      arch/s390/include/uapi/asm/unistd.h
  42. 1 0
      arch/s390/kernel/compat_wrapper.c
  43. 0 2
      arch/s390/kernel/crash_dump.c
  44. 1 1
      arch/s390/kernel/debug.c
  45. 4 5
      arch/s390/kernel/dumpstack.c
  46. 4 4
      arch/s390/kernel/early.c
  47. 1 1
      arch/s390/kernel/ftrace.c
  48. 2 2
      arch/s390/kernel/ipl.c
  49. 8 8
      arch/s390/kernel/kprobes.c
  50. 5 7
      arch/s390/kernel/perf_event.c
  51. 6 6
      arch/s390/kernel/process.c
  52. 2 4
      arch/s390/kernel/ptrace.c
  53. 6 10
      arch/s390/kernel/setup.c
  54. 6 7
      arch/s390/kernel/signal.c
  55. 0 2
      arch/s390/kernel/smp.c
  56. 5 6
      arch/s390/kernel/stacktrace.c
  57. 1 0
      arch/s390/kernel/syscalls.S
  58. 5 6
      arch/s390/kernel/traps.c
  59. 1 0
      arch/s390/kvm/Kconfig
  60. 1 1
      arch/s390/kvm/Makefile
  61. 2 2
      arch/s390/kvm/guestdbg.c
  62. 42 79
      arch/s390/kvm/kvm-s390.c
  63. 3 3
      arch/s390/mm/fault.c
  64. 1 1
      arch/s390/mm/init.c
  65. 6 6
      arch/s390/mm/mmap.c
  66. 1 1
      arch/s390/mm/pgtable.c
  67. 2 4
      arch/s390/numa/numa.c
  68. 4 5
      arch/s390/oprofile/backtrace.c
  69. 31 30
      arch/s390/pci/pci.c
  70. 5 0
      arch/s390/pci/pci_event.c
  71. 0 1
      arch/sh/include/asm/barrier.h
  72. 1 2
      arch/x86/Kconfig
  73. 3 2
      arch/x86/include/asm/irq.h
  74. 2 4
      arch/x86/include/asm/pgtable_types.h
  75. 5 1
      arch/x86/kernel/apic/io_apic.c
  76. 145 76
      arch/x86/kernel/apic/vector.c
  77. 4 1
      arch/x86/kernel/apic/x2apic_uv_x.c
  78. 3 4
      arch/x86/kernel/cpu/perf_event_intel.c
  79. 3 0
      arch/x86/kernel/cpu/perf_event_intel_uncore.c
  80. 1 0
      arch/x86/kernel/cpu/perf_event_intel_uncore.h
  81. 20 0
      arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
  82. 8 0
      arch/x86/kernel/head64.c
  83. 10 1
      arch/x86/kernel/irq.c
  84. 2 2
      arch/x86/mm/pageattr.c
  85. 13 4
      arch/x86/platform/efi/quirks.c
  86. 3 5
      arch/x86/platform/intel-mid/intel-mid.c
  87. 13 5
      arch/x86/platform/intel-quark/imr.c
  88. 19 7
      block/blk-merge.c
  89. 0 8
      drivers/acpi/video_detect.c
  90. 2 0
      drivers/base/platform-msi.c
  91. 9 4
      drivers/base/platform.c
  92. 20 17
      drivers/base/power/domain.c
  93. 12 0
      drivers/clocksource/Kconfig
  94. 2 1
      drivers/clocksource/tcb_clksrc.c
  95. 7 8
      drivers/cpufreq/cpufreq-dt.c
  96. 3 3
      drivers/cpufreq/cpufreq.c
  97. 8 3
      drivers/cpufreq/cpufreq_governor.c
  98. 1 1
      drivers/cpufreq/pxa2xx-cpufreq.c
  99. 0 1
      drivers/cpuidle/coupled.c
  100. 1 1
      drivers/cpuidle/cpuidle.c

+ 1 - 0
.mailmap

@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
 Andy Adamson <andros@citi.umich.edu>
+Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
 Archit Taneja <archit@ti.com>
 Archit Taneja <archit@ti.com>
 Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
 Arnd Bergmann <arnd@arndb.de>

+ 1 - 1
Documentation/Intel-IOMMU.txt

@@ -3,7 +3,7 @@ Linux IOMMU Support
 
 
 The architecture spec can be obtained from the below location.
 The architecture spec can be obtained from the below location.
 
 
-http://www.intel.com/technology/virtualization/
+http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
 
 
 This guide gives a quick cheat sheet for some basic understanding.
 This guide gives a quick cheat sheet for some basic understanding.
 
 

+ 50 - 0
Documentation/devicetree/bindings/mfd/tps65912.txt

@@ -0,0 +1,50 @@
+* TPS65912 Power Management Integrated Circuit bindings
+
+Required properties:
+ - compatible		: Should be "ti,tps65912".
+ - reg			: Slave address or chip select number (I2C / SPI).
+ - interrupt-parent	: The parent interrupt controller.
+ - interrupts		: The interrupt line the device is connected to.
+ - interrupt-controller	: Marks the device node as an interrupt controller.
+ - #interrupt-cells	: The number of cells to describe an IRQ, should be 2.
+			    The first cell is the IRQ number.
+			    The second cell is the flags, encoded as trigger
+			    masks from ../interrupt-controller/interrupts.txt.
+ - gpio-controller	: Marks the device node as a GPIO Controller.
+ - #gpio-cells		: Should be two.  The first cell is the pin number and
+			    the second cell is used to specify flags.
+			    See ../gpio/gpio.txt for more information.
+ - regulators:		: List of child nodes that specify the regulator
+			    initialization data. Child nodes must be named
+			    after their hardware counterparts: dcdc[1-4] and
+			    ldo[1-10]. Each child nodes is defined using the
+			    standard binding for regulators.
+
+Example:
+
+	pmic: tps65912@2d {
+		compatible = "ti,tps65912";
+		reg = <0x2d>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		regulators {
+			dcdc1 {
+				regulator-name = "vdd_core";
+				regulator-min-microvolt = <912000>;
+				regulator-max-microvolt = <1144000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo1 {
+				regulator-name = "ldo1";
+				regulator-min-microvolt = <1900000>;
+				regulator-max-microvolt = <1900000>;
+			};
+		};
+	};

+ 1 - 1
Documentation/virtual/kvm/api.txt

@@ -3025,7 +3025,7 @@ len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0
 and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
 and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
 which is the maximum number of possibly pending cpu-local interrupts.
 which is the maximum number of possibly pending cpu-local interrupts.
 
 
-4.90 KVM_SMI
+4.96 KVM_SMI
 
 
 Capability: KVM_CAP_X86_SMM
 Capability: KVM_CAP_X86_SMM
 Architectures: x86
 Architectures: x86

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 5
 PATCHLEVEL = 5
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Blurry Fish Butt
 NAME = Blurry Fish Butt
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 9 - 0
arch/arm/boot/compressed/Makefile

@@ -106,6 +106,15 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 endif
 
 
+# -fstack-protector-strong triggers protection checks in this code,
+# but it is being used too early to link to meaningful stack_chk logic.
+nossp_flags := $(call cc-option, -fno-stack-protector)
+CFLAGS_atags_to_fdt.o := $(nossp_flags)
+CFLAGS_fdt.o := $(nossp_flags)
+CFLAGS_fdt_ro.o := $(nossp_flags)
+CFLAGS_fdt_rw.o := $(nossp_flags)
+CFLAGS_fdt_wip.o := $(nossp_flags)
+
 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
 asflags-y := -DZIMAGE
 asflags-y := -DZIMAGE
 
 

+ 1 - 0
arch/arm/include/uapi/asm/unistd.h

@@ -417,6 +417,7 @@
 #define __NR_userfaultfd		(__NR_SYSCALL_BASE+388)
 #define __NR_userfaultfd		(__NR_SYSCALL_BASE+388)
 #define __NR_membarrier			(__NR_SYSCALL_BASE+389)
 #define __NR_membarrier			(__NR_SYSCALL_BASE+389)
 #define __NR_mlock2			(__NR_SYSCALL_BASE+390)
 #define __NR_mlock2			(__NR_SYSCALL_BASE+390)
+#define __NR_copy_file_range		(__NR_SYSCALL_BASE+391)
 
 
 /*
 /*
  * The following SWIs are ARM private.
  * The following SWIs are ARM private.

+ 1 - 0
arch/arm/kernel/calls.S

@@ -400,6 +400,7 @@
 		CALL(sys_userfaultfd)
 		CALL(sys_userfaultfd)
 		CALL(sys_membarrier)
 		CALL(sys_membarrier)
 		CALL(sys_mlock2)
 		CALL(sys_mlock2)
+		CALL(sys_copy_file_range)
 #ifndef syscalls_counted
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
 #define syscalls_counted

+ 2 - 0
arch/arm64/Makefile

@@ -27,6 +27,8 @@ $(warning LSE atomics not supported by binutils)
 endif
 endif
 
 
 KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
 KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
+KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS	+= $(call cc-option, -mpc-relative-literal-loads)
 KBUILD_AFLAGS	+= $(lseinstr)
 KBUILD_AFLAGS	+= $(lseinstr)
 
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)

+ 25 - 17
arch/arm64/configs/defconfig

@@ -16,7 +16,6 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MEMCG=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
 CONFIG_CGROUP_HUGETLB=y
 CONFIG_CGROUP_HUGETLB=y
 # CONFIG_UTS_NS is not set
 # CONFIG_UTS_NS is not set
 # CONFIG_IPC_NS is not set
 # CONFIG_IPC_NS is not set
@@ -37,15 +36,13 @@ CONFIG_ARCH_EXYNOS7=y
 CONFIG_ARCH_LAYERSCAPE=y
 CONFIG_ARCH_LAYERSCAPE=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_MEDIATEK=y
 CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
 CONFIG_ARCH_SEATTLE=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_TEGRA=y
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_132_SOC=y
-CONFIG_ARCH_TEGRA_210_SOC=y
-CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SPRD=y
 CONFIG_ARCH_SPRD=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_UNIPHIER=y
 CONFIG_ARCH_UNIPHIER=y
@@ -54,14 +51,19 @@ CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
 CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_MSI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_RCAR_GEN2_PCIE=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_PCI_XGENE=y
-CONFIG_SMP=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
 CONFIG_SCHED_MC=y
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_PREEMPT=y
 CONFIG_KSM=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
 CONFIG_CMA=y
+CONFIG_XEN=y
 CONFIG_CMDLINE="console=ttyAMA0"
 CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
 CONFIG_COMPAT=y
@@ -100,7 +102,11 @@ CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
 CONFIG_TUN=y
 CONFIG_VIRTIO_NET=y
 CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
 CONFIG_NET_XGENE=y
 CONFIG_NET_XGENE=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
 CONFIG_SKY2=y
 CONFIG_SKY2=y
 CONFIG_RAVB=y
 CONFIG_RAVB=y
 CONFIG_SMC91X=y
 CONFIG_SMC91X=y
@@ -117,25 +123,23 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
 CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG=y
-CONFIG_SERIAL_SAMSUNG_UARTS_4=y
-CONFIG_SERIAL_SAMSUNG_UARTS=4
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=11
 CONFIG_SERIAL_SH_SCI_NR_UARTS=11
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_MSM_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
 CONFIG_I2C_QUP=y
 CONFIG_I2C_QUP=y
+CONFIG_I2C_UNIPHIER_F=y
 CONFIG_I2C_RCAR=y
 CONFIG_I2C_RCAR=y
 CONFIG_SPI=y
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_PL022=y
@@ -176,8 +180,6 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SPI=y
 CONFIG_MMC_SPI=y
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
-CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_NEW_LEDS=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_CLASS=y
@@ -187,28 +189,33 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_EFI=y
 CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_XGENE=y
 CONFIG_RTC_DRV_XGENE=y
 CONFIG_DMADEVICES=y
 CONFIG_DMADEVICES=y
-CONFIG_RCAR_DMAC=y
 CONFIG_QCOM_BAM_DMA=y
 CONFIG_QCOM_BAM_DMA=y
 CONFIG_TEGRA20_APB_DMA=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
 CONFIG_COMMON_CLK_CS2000_CP=y
 CONFIG_COMMON_CLK_CS2000_CP=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8916=y
 CONFIG_MSM_GCC_8916=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_HWSPINLOCK_QCOM=y
-# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ARM_SMMU=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SMD_RPM=y
 CONFIG_QCOM_SMD_RPM=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_HISILICON_IRQ_MBIGEN=y
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_XGENE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA=y
@@ -239,6 +246,7 @@ CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_FTRACE is not set
 # CONFIG_FTRACE is not set
 CONFIG_MEMTEST=y
 CONFIG_MEMTEST=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y

+ 10 - 11
arch/arm64/include/asm/pgtable.h

@@ -67,11 +67,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 #define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
 
-#define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
 
 
 #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
 #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -81,7 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 
 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX		__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
 
@@ -153,6 +153,7 @@ extern struct page *empty_zero_page;
 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
+#define pte_user(pte)		(!!(pte_val(pte) & PTE_USER))
 
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 #ifdef CONFIG_ARM64_HW_AFDBM
 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -163,8 +164,6 @@ extern struct page *empty_zero_page;
 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 
 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
-#define pte_valid_user(pte) \
-	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 #define pte_valid_not_user(pte) \
 #define pte_valid_not_user(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 #define pte_valid_young(pte) \
 #define pte_valid_young(pte) \
@@ -278,13 +277,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t pte)
 			      pte_t *ptep, pte_t pte)
 {
 {
-	if (pte_valid_user(pte)) {
-		if (!pte_special(pte) && pte_exec(pte))
-			__sync_icache_dcache(pte, addr);
+	if (pte_valid(pte)) {
 		if (pte_sw_dirty(pte) && pte_write(pte))
 		if (pte_sw_dirty(pte) && pte_write(pte))
 			pte_val(pte) &= ~PTE_RDONLY;
 			pte_val(pte) &= ~PTE_RDONLY;
 		else
 		else
 			pte_val(pte) |= PTE_RDONLY;
 			pte_val(pte) |= PTE_RDONLY;
+		if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
+			__sync_icache_dcache(pte, addr);
 	}
 	}
 
 
 	/*
 	/*

+ 5 - 0
arch/arm64/kernel/head.S

@@ -514,9 +514,14 @@ CPU_LE(	movk	x0, #0x30d0, lsl #16	)	// Clear EE and E0E on LE systems
 #endif
 #endif
 
 
 	/* EL2 debug */
 	/* EL2 debug */
+	mrs	x0, id_aa64dfr0_el1		// Check ID_AA64DFR0_EL1 PMUVer
+	sbfx	x0, x0, #8, #4
+	cmp	x0, #1
+	b.lt	4f				// Skip if no PMU present
 	mrs	x0, pmcr_el0			// Disable debug access traps
 	mrs	x0, pmcr_el0			// Disable debug access traps
 	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
 	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
 	msr	mdcr_el2, x0			// all PMU counters from EL1
 	msr	mdcr_el2, x0			// all PMU counters from EL1
+4:
 
 
 	/* Stage-2 translation */
 	/* Stage-2 translation */
 	msr	vttbr_el2, xzr
 	msr	vttbr_el2, xzr

+ 25 - 15
arch/arm64/kernel/image.h

@@ -64,6 +64,16 @@
 
 
 #ifdef CONFIG_EFI
 #ifdef CONFIG_EFI
 
 
+/*
+ * Prevent the symbol aliases below from being emitted into the kallsyms
+ * table, by forcing them to be absolute symbols (which are conveniently
+ * ignored by scripts/kallsyms) rather than section relative symbols.
+ * The distinction is only relevant for partial linking, and only for symbols
+ * that are defined within a section declaration (which is not the case for
+ * the definitions below) so the resulting values will be identical.
+ */
+#define KALLSYMS_HIDE(sym)	ABSOLUTE(sym)
+
 /*
 /*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * isolate it from the kernel proper. The following symbols are legally
  * isolate it from the kernel proper. The following symbols are legally
@@ -73,25 +83,25 @@
  * linked at. The routines below are all implemented in assembler in a
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  * position independent manner
  */
  */
-__efistub_memcmp		= __pi_memcmp;
-__efistub_memchr		= __pi_memchr;
-__efistub_memcpy		= __pi_memcpy;
-__efistub_memmove		= __pi_memmove;
-__efistub_memset		= __pi_memset;
-__efistub_strlen		= __pi_strlen;
-__efistub_strcmp		= __pi_strcmp;
-__efistub_strncmp		= __pi_strncmp;
-__efistub___flush_dcache_area	= __pi___flush_dcache_area;
+__efistub_memcmp		= KALLSYMS_HIDE(__pi_memcmp);
+__efistub_memchr		= KALLSYMS_HIDE(__pi_memchr);
+__efistub_memcpy		= KALLSYMS_HIDE(__pi_memcpy);
+__efistub_memmove		= KALLSYMS_HIDE(__pi_memmove);
+__efistub_memset		= KALLSYMS_HIDE(__pi_memset);
+__efistub_strlen		= KALLSYMS_HIDE(__pi_strlen);
+__efistub_strcmp		= KALLSYMS_HIDE(__pi_strcmp);
+__efistub_strncmp		= KALLSYMS_HIDE(__pi_strncmp);
+__efistub___flush_dcache_area	= KALLSYMS_HIDE(__pi___flush_dcache_area);
 
 
 #ifdef CONFIG_KASAN
 #ifdef CONFIG_KASAN
-__efistub___memcpy		= __pi_memcpy;
-__efistub___memmove		= __pi_memmove;
-__efistub___memset		= __pi_memset;
+__efistub___memcpy		= KALLSYMS_HIDE(__pi_memcpy);
+__efistub___memmove		= KALLSYMS_HIDE(__pi_memmove);
+__efistub___memset		= KALLSYMS_HIDE(__pi_memset);
 #endif
 #endif
 
 
-__efistub__text			= _text;
-__efistub__end			= _end;
-__efistub__edata		= _edata;
+__efistub__text			= KALLSYMS_HIDE(_text);
+__efistub__end			= KALLSYMS_HIDE(_end);
+__efistub__edata		= KALLSYMS_HIDE(_edata);
 
 
 #endif
 #endif
 
 

+ 1 - 1
arch/arm64/mm/dump.c

@@ -46,7 +46,7 @@ enum address_markers_idx {
 	PCI_START_NR,
 	PCI_START_NR,
 	PCI_END_NR,
 	PCI_END_NR,
 	MODULES_START_NR,
 	MODULES_START_NR,
-	MODUELS_END_NR,
+	MODULES_END_NR,
 	KERNEL_SPACE_NR,
 	KERNEL_SPACE_NR,
 };
 };
 
 

+ 9 - 0
arch/arm64/mm/kasan_init.c

@@ -120,6 +120,7 @@ static void __init cpu_set_ttbr1(unsigned long ttbr1)
 void __init kasan_init(void)
 void __init kasan_init(void)
 {
 {
 	struct memblock_region *reg;
 	struct memblock_region *reg;
+	int i;
 
 
 	/*
 	/*
 	 * We are going to perform proper setup of shadow memory.
 	 * We are going to perform proper setup of shadow memory.
@@ -155,6 +156,14 @@ void __init kasan_init(void)
 				pfn_to_nid(virt_to_pfn(start)));
 				pfn_to_nid(virt_to_pfn(start)));
 	}
 	}
 
 
+	/*
+	 * KAsan may reuse the contents of kasan_zero_pte directly, so we
+	 * should make sure that it maps the zero page read-only.
+	 */
+	for (i = 0; i < PTRS_PER_PTE; i++)
+		set_pte(&kasan_zero_pte[i],
+			pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+
 	memset(kasan_zero_page, 0, PAGE_SIZE);
 	memset(kasan_zero_page, 0, PAGE_SIZE);
 	cpu_set_ttbr1(__pa(swapper_pg_dir));
 	cpu_set_ttbr1(__pa(swapper_pg_dir));
 	flush_tlb_all();
 	flush_tlb_all();

+ 3 - 0
arch/arm64/mm/pageattr.c

@@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
 	if (end < MODULES_VADDR || end >= MODULES_END)
 	if (end < MODULES_VADDR || end >= MODULES_END)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	if (!numpages)
+		return 0;
+
 	data.set_mask = set_mask;
 	data.set_mask = set_mask;
 	data.clear_mask = clear_mask;
 	data.clear_mask = clear_mask;
 
 

+ 12 - 0
arch/arm64/mm/proc-macros.S

@@ -84,3 +84,15 @@
 	b.lo	9998b
 	b.lo	9998b
 	dsb	\domain
 	dsb	\domain
 	.endm
 	.endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+	.macro	reset_pmuserenr_el0, tmpreg
+	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
+	sbfx	\tmpreg, \tmpreg, #8, #4
+	cmp	\tmpreg, #1			// Skip if no PMU present
+	b.lt	9000f
+	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
+9000:
+	.endm

+ 2 - 2
arch/arm64/mm/proc.S

@@ -117,7 +117,7 @@ ENTRY(cpu_do_resume)
 	 */
 	 */
 	ubfx	x11, x11, #1, #1
 	ubfx	x11, x11, #1, #1
 	msr	oslar_el1, x11
 	msr	oslar_el1, x11
-	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
+	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
 	mov	x0, x12
 	mov	x0, x12
 	dsb	nsh		// Make sure local tlb invalidation completed
 	dsb	nsh		// Make sure local tlb invalidation completed
 	isb
 	isb
@@ -154,7 +154,7 @@ ENTRY(__cpu_setup)
 	msr	cpacr_el1, x0			// Enable FP/ASIMD
 	msr	cpacr_el1, x0			// Enable FP/ASIMD
 	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
 	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
 	msr	mdscr_el1, x0			// access to the DCC from EL0
 	msr	mdscr_el1, x0			// access to the DCC from EL0
-	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
+	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
 	/*
 	/*
 	 * Memory region attributes for LPAE:
 	 * Memory region attributes for LPAE:
 	 *
 	 *

+ 11 - 0
arch/mips/bcm63xx/nvram.c

@@ -19,6 +19,8 @@
 
 
 #include <bcm63xx_nvram.h>
 #include <bcm63xx_nvram.h>
 
 
+#define BCM63XX_DEFAULT_PSI_SIZE	64
+
 static struct bcm963xx_nvram nvram;
 static struct bcm963xx_nvram nvram;
 static int mac_addr_used;
 static int mac_addr_used;
 
 
@@ -85,3 +87,12 @@ int bcm63xx_nvram_get_mac_address(u8 *mac)
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
 EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
+
+int bcm63xx_nvram_get_psi_size(void)
+{
+	if (nvram.psi_size > 0)
+		return nvram.psi_size;
+
+	return BCM63XX_DEFAULT_PSI_SIZE;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_psi_size);

+ 2 - 0
arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h

@@ -30,4 +30,6 @@ u8 *bcm63xx_nvram_get_name(void);
  */
  */
 int bcm63xx_nvram_get_mac_address(u8 *mac);
 int bcm63xx_nvram_get_mac_address(u8 *mac);
 
 
+int bcm63xx_nvram_get_psi_size(void);
+
 #endif /* BCM63XX_NVRAM_H */
 #endif /* BCM63XX_NVRAM_H */

+ 3 - 1
arch/powerpc/include/asm/book3s/64/hash.h

@@ -50,7 +50,9 @@
  * set of bits not changed in pmd_modify.
  * set of bits not changed in pmd_modify.
  */
  */
 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
-			 _PAGE_ACCESSED | _PAGE_THP_HUGE)
+			 _PAGE_ACCESSED | _PAGE_THP_HUGE | _PAGE_PTE | \
+			 _PAGE_SOFT_DIRTY)
+
 
 
 #ifdef CONFIG_PPC_64K_PAGES
 #ifdef CONFIG_PPC_64K_PAGES
 #include <asm/book3s/64/hash-64k.h>
 #include <asm/book3s/64/hash-64k.h>

+ 0 - 1
arch/powerpc/include/asm/book3s/64/pgtable.h

@@ -223,7 +223,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
 #define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
 #define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
-#define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))

+ 1 - 2
arch/powerpc/include/asm/kvm_host.h

@@ -38,8 +38,7 @@
 
 
 #define KVM_MAX_VCPUS		NR_CPUS
 #define KVM_MAX_VCPUS		NR_CPUS
 #define KVM_MAX_VCORES		NR_CPUS
 #define KVM_MAX_VCORES		NR_CPUS
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
+#define KVM_USER_MEM_SLOTS	512
 
 
 #ifdef CONFIG_KVM_MMIO
 #ifdef CONFIG_KVM_MMIO
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1

+ 1 - 0
arch/powerpc/include/asm/systbl.h

@@ -383,3 +383,4 @@ SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(mlock2)
 SYSCALL(mlock2)
+SYSCALL(copy_file_range)

+ 1 - 1
arch/powerpc/include/asm/unistd.h

@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 #include <uapi/asm/unistd.h>
 
 
 
 
-#define NR_syscalls		379
+#define NR_syscalls		380
 
 
 #define __NR__exit __NR_exit
 #define __NR__exit __NR_exit
 
 

+ 1 - 0
arch/powerpc/include/uapi/asm/unistd.h

@@ -389,5 +389,6 @@
 #define __NR_userfaultfd	364
 #define __NR_userfaultfd	364
 #define __NR_membarrier		365
 #define __NR_membarrier		365
 #define __NR_mlock2		378
 #define __NR_mlock2		378
+#define __NR_copy_file_range	379
 
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

+ 15 - 18
arch/powerpc/kernel/eeh_pe.c

@@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
 const char *eeh_pe_loc_get(struct eeh_pe *pe)
 const char *eeh_pe_loc_get(struct eeh_pe *pe)
 {
 {
 	struct pci_bus *bus = eeh_pe_bus_get(pe);
 	struct pci_bus *bus = eeh_pe_bus_get(pe);
-	struct device_node *dn = pci_bus_to_OF_node(bus);
+	struct device_node *dn;
 	const char *loc = NULL;
 	const char *loc = NULL;
 
 
-	if (!dn)
-		goto out;
+	while (bus) {
+		dn = pci_bus_to_OF_node(bus);
+		if (!dn) {
+			bus = bus->parent;
+			continue;
+		}
 
 
-	/* PHB PE or root PE ? */
-	if (pci_is_root_bus(bus)) {
-		loc = of_get_property(dn, "ibm,loc-code", NULL);
-		if (!loc)
+		if (pci_is_root_bus(bus))
 			loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
 			loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
+		else
+			loc = of_get_property(dn, "ibm,slot-location-code",
+					      NULL);
+
 		if (loc)
 		if (loc)
-			goto out;
+			return loc;
 
 
-		/* Check the root port */
-		dn = dn->child;
-		if (!dn)
-			goto out;
+		bus = bus->parent;
 	}
 	}
 
 
-	loc = of_get_property(dn, "ibm,loc-code", NULL);
-	if (!loc)
-		loc = of_get_property(dn, "ibm,slot-location-code", NULL);
-
-out:
-	return loc ? loc : "N/A";
+	return "N/A";
 }
 }
 
 
 /**
 /**

+ 0 - 28
arch/powerpc/kernel/misc_64.S

@@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence)
 	li	r5,0
 	li	r5,0
 	blr	/* image->start(physid, image->start, 0); */
 	blr	/* image->start(physid, image->start, 0); */
 #endif /* CONFIG_KEXEC */
 #endif /* CONFIG_KEXEC */
-
-#ifdef CONFIG_MODULES
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-
-#ifdef CONFIG_MODVERSIONS
-.weak __crc_TOC.
-.section "___kcrctab+TOC.","a"
-.globl __kcrctab_TOC.
-__kcrctab_TOC.:
-	.llong	__crc_TOC.
-#endif
-
-/*
- * Export a fake .TOC. since both modpost and depmod will complain otherwise.
- * Both modpost and depmod strip the leading . so we do the same here.
- */
-.section "__ksymtab_strings","a"
-__kstrtab_TOC.:
-	.asciz "TOC."
-
-.section "___ksymtab+TOC.","a"
-/* This symbol name is important: it's used by modpost to find exported syms */
-.globl __ksymtab_TOC.
-__ksymtab_TOC.:
-	.llong 0 /* .value */
-	.llong __kstrtab_TOC.
-#endif /* ELFv2 */
-#endif /* MODULES */

+ 9 - 3
arch/powerpc/kernel/module_64.c

@@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers,
 		}
 		}
 }
 }
 
 
-/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
+/*
+ * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
+ * seem to be defined (value set later).
+ */
 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 {
 {
 	unsigned int i;
 	unsigned int i;
@@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 	for (i = 1; i < numsyms; i++) {
 	for (i = 1; i < numsyms; i++) {
 		if (syms[i].st_shndx == SHN_UNDEF) {
 		if (syms[i].st_shndx == SHN_UNDEF) {
 			char *name = strtab + syms[i].st_name;
 			char *name = strtab + syms[i].st_name;
-			if (name[0] == '.')
+			if (name[0] == '.') {
+				if (strcmp(name+1, "TOC.") == 0)
+					syms[i].st_shndx = SHN_ABS;
 				memmove(name, name+1, strlen(name));
 				memmove(name, name+1, strlen(name));
+			}
 		}
 		}
 	}
 	}
 }
 }
@@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
 	numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
 	numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
 
 
 	for (i = 1; i < numsyms; i++) {
 	for (i = 1; i < numsyms; i++) {
-		if (syms[i].st_shndx == SHN_UNDEF
+		if (syms[i].st_shndx == SHN_ABS
 		    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
 		    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
 			return &syms[i];
 			return &syms[i];
 	}
 	}

+ 0 - 3
arch/powerpc/kvm/book3s_64_mmu.c

@@ -377,15 +377,12 @@ no_seg_found:
 
 
 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
 {
 {
-	struct kvmppc_vcpu_book3s *vcpu_book3s;
 	u64 esid, esid_1t;
 	u64 esid, esid_1t;
 	int slb_nr;
 	int slb_nr;
 	struct kvmppc_slb *slbe;
 	struct kvmppc_slb *slbe;
 
 
 	dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
 	dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
 
 
-	vcpu_book3s = to_book3s(vcpu);
-
 	esid = GET_ESID(rb);
 	esid = GET_ESID(rb);
 	esid_1t = GET_ESID_1T(rb);
 	esid_1t = GET_ESID_1T(rb);
 	slb_nr = rb & 0xfff;
 	slb_nr = rb & 0xfff;

+ 18 - 0
arch/powerpc/kvm/book3s_hv.c

@@ -833,6 +833,24 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
 
 	vcpu->stat.sum_exits++;
 	vcpu->stat.sum_exits++;
 
 
+	/*
+	 * This can happen if an interrupt occurs in the last stages
+	 * of guest entry or the first stages of guest exit (i.e. after
+	 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
+	 * and before setting it to KVM_GUEST_MODE_HOST_HV).
+	 * That can happen due to a bug, or due to a machine check
+	 * occurring at just the wrong time.
+	 */
+	if (vcpu->arch.shregs.msr & MSR_HV) {
+		printk(KERN_EMERG "KVM trap in HV mode!\n");
+		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+			vcpu->arch.trap, kvmppc_get_pc(vcpu),
+			vcpu->arch.shregs.msr);
+		kvmppc_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		run->hw.hardware_exit_reason = vcpu->arch.trap;
+		return RESUME_HOST;
+	}
 	run->exit_reason = KVM_EXIT_UNKNOWN;
 	run->exit_reason = KVM_EXIT_UNKNOWN;
 	run->ready_for_interrupt_injection = 1;
 	run->ready_for_interrupt_injection = 1;
 	switch (vcpu->arch.trap) {
 	switch (vcpu->arch.trap) {

+ 3 - 1
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 
 	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
 	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
 2:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
 2:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
-	rlwimi	r5, r4, 1, DAWRX_WT
+	rlwimi	r5, r4, 2, DAWRX_WT
 	clrrdi	r4, r4, 3
 	clrrdi	r4, r4, 3
 	std	r4, VCPU_DAWR(r3)
 	std	r4, VCPU_DAWR(r3)
 	std	r5, VCPU_DAWRX(r3)
 	std	r5, VCPU_DAWRX(r3)
@@ -2404,6 +2404,8 @@ machine_check_realmode:
 	 * guest as machine check causing guest to crash.
 	 * guest as machine check causing guest to crash.
 	 */
 	 */
 	ld	r11, VCPU_MSR(r9)
 	ld	r11, VCPU_MSR(r9)
+	rldicl.	r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
+	bne	mc_cont			/* if so, exit to host */
 	andi.	r10, r11, MSR_RI	/* check for unrecoverable exception */
 	andi.	r10, r11, MSR_RI	/* check for unrecoverable exception */
 	beq	1f			/* Deliver a machine check to guest */
 	beq	1f			/* Deliver a machine check to guest */
 	ld	r10, VCPU_PC(r9)
 	ld	r10, VCPU_PC(r9)

+ 10 - 10
arch/powerpc/kvm/powerpc.c

@@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 				r = -ENXIO;
 				r = -ENXIO;
 				break;
 				break;
 			}
 			}
-			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
 			break;
 			break;
 		case KVM_REG_PPC_VSCR:
 		case KVM_REG_PPC_VSCR:
 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 				r = -ENXIO;
 				r = -ENXIO;
 				break;
 				break;
 			}
 			}
-			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
 			break;
 			break;
 		case KVM_REG_PPC_VRSAVE:
 		case KVM_REG_PPC_VRSAVE:
-			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-				r = -ENXIO;
-				break;
-			}
-			vcpu->arch.vrsave = set_reg_val(reg->id, val);
+			val = get_reg_val(reg->id, vcpu->arch.vrsave);
 			break;
 			break;
 #endif /* CONFIG_ALTIVEC */
 #endif /* CONFIG_ALTIVEC */
 		default:
 		default:
@@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 				r = -ENXIO;
 				r = -ENXIO;
 				break;
 				break;
 			}
 			}
-			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
 			break;
 			break;
 		case KVM_REG_PPC_VSCR:
 		case KVM_REG_PPC_VSCR:
 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
 				r = -ENXIO;
 				r = -ENXIO;
 				break;
 				break;
 			}
 			}
-			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
 			break;
 			break;
 		case KVM_REG_PPC_VRSAVE:
 		case KVM_REG_PPC_VRSAVE:
-			val = get_reg_val(reg->id, vcpu->arch.vrsave);
+			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+				r = -ENXIO;
+				break;
+			}
+			vcpu->arch.vrsave = set_reg_val(reg->id, val);
 			break;
 			break;
 #endif /* CONFIG_ALTIVEC */
 #endif /* CONFIG_ALTIVEC */
 		default:
 		default:

+ 2 - 2
arch/powerpc/mm/mem.c

@@ -560,12 +560,12 @@ subsys_initcall(add_system_ram_resources);
  */
  */
 int devmem_is_allowed(unsigned long pfn)
 int devmem_is_allowed(unsigned long pfn)
 {
 {
+	if (page_is_rtas_user_buf(pfn))
+		return 1;
 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
 		return 0;
 		return 0;
 	if (!page_is_ram(pfn))
 	if (!page_is_ram(pfn))
 		return 1;
 		return 1;
-	if (page_is_rtas_user_buf(pfn))
-		return 1;
 	return 0;
 	return 0;
 }
 }
 #endif /* CONFIG_STRICT_DEVMEM */
 #endif /* CONFIG_STRICT_DEVMEM */

+ 1 - 1
arch/powerpc/perf/power8-pmu.c

@@ -816,7 +816,7 @@ static struct power_pmu power8_pmu = {
 	.get_constraint		= power8_get_constraint,
 	.get_constraint		= power8_get_constraint,
 	.get_alternatives	= power8_get_alternatives,
 	.get_alternatives	= power8_get_alternatives,
 	.disable_pmc		= power8_disable_pmc,
 	.disable_pmc		= power8_disable_pmc,
-	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
+	.flags			= PPMU_HAS_SIER | PPMU_ARCH_207S,
 	.n_generic		= ARRAY_SIZE(power8_generic_events),
 	.n_generic		= ARRAY_SIZE(power8_generic_events),
 	.generic_events		= power8_generic_events,
 	.generic_events		= power8_generic_events,
 	.cache_events		= &power8_cache_events,
 	.cache_events		= &power8_cache_events,

+ 7 - 2
arch/s390/include/asm/irqflags.h

@@ -8,6 +8,8 @@
 
 
 #include <linux/types.h>
 #include <linux/types.h>
 
 
+#define ARCH_IRQ_ENABLED	(3UL << (BITS_PER_LONG - 8))
+
 /* store then OR system mask. */
 /* store then OR system mask. */
 #define __arch_local_irq_stosm(__or)					\
 #define __arch_local_irq_stosm(__or)					\
 ({									\
 ({									\
@@ -54,14 +56,17 @@ static inline notrace void arch_local_irq_enable(void)
 	__arch_local_irq_stosm(0x03);
 	__arch_local_irq_stosm(0x03);
 }
 }
 
 
+/* This only restores external and I/O interrupt state */
 static inline notrace void arch_local_irq_restore(unsigned long flags)
 static inline notrace void arch_local_irq_restore(unsigned long flags)
 {
 {
-	__arch_local_irq_ssm(flags);
+	/* only disabled->disabled and disabled->enabled is valid */
+	if (flags & ARCH_IRQ_ENABLED)
+		arch_local_irq_enable();
 }
 }
 
 
 static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
 static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
 {
 {
-	return !(flags & (3UL << (BITS_PER_LONG - 8)));
+	return !(flags & ARCH_IRQ_ENABLED);
 }
 }
 
 
 static inline notrace bool arch_irqs_disabled(void)
 static inline notrace bool arch_irqs_disabled(void)

+ 0 - 1
arch/s390/include/asm/kvm_host.h

@@ -546,7 +546,6 @@ struct kvm_vcpu_arch {
 	struct kvm_s390_sie_block *sie_block;
 	struct kvm_s390_sie_block *sie_block;
 	unsigned int      host_acrs[NUM_ACRS];
 	unsigned int      host_acrs[NUM_ACRS];
 	struct fpu	  host_fpregs;
 	struct fpu	  host_fpregs;
-	struct fpu	  guest_fpregs;
 	struct kvm_s390_local_interrupt local_int;
 	struct kvm_s390_local_interrupt local_int;
 	struct hrtimer    ckc_timer;
 	struct hrtimer    ckc_timer;
 	struct kvm_s390_pgm_info pgm;
 	struct kvm_s390_pgm_info pgm;

+ 9 - 5
arch/s390/include/asm/pci_io.h

@@ -8,10 +8,13 @@
 #include <asm/pci_insn.h>
 #include <asm/pci_insn.h>
 
 
 /* I/O Map */
 /* I/O Map */
-#define ZPCI_IOMAP_MAX_ENTRIES		0x7fff
-#define ZPCI_IOMAP_ADDR_BASE		0x8000000000000000ULL
-#define ZPCI_IOMAP_ADDR_IDX_MASK	0x7fff000000000000ULL
-#define ZPCI_IOMAP_ADDR_OFF_MASK	0x0000ffffffffffffULL
+#define ZPCI_IOMAP_SHIFT		48
+#define ZPCI_IOMAP_ADDR_BASE		0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_OFF_MASK	((1UL << ZPCI_IOMAP_SHIFT) - 1)
+#define ZPCI_IOMAP_MAX_ENTRIES							\
+	((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+#define ZPCI_IOMAP_ADDR_IDX_MASK						\
+	(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
 
 
 struct zpci_iomap_entry {
 struct zpci_iomap_entry {
 	u32 fh;
 	u32 fh;
@@ -21,8 +24,9 @@ struct zpci_iomap_entry {
 
 
 extern struct zpci_iomap_entry *zpci_iomap_start;
 extern struct zpci_iomap_entry *zpci_iomap_start;
 
 
+#define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT))
 #define ZPCI_IDX(addr)								\
 #define ZPCI_IDX(addr)								\
-	(((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
+	(((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT)
 #define ZPCI_OFFSET(addr)							\
 #define ZPCI_OFFSET(addr)							\
 	((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
 	((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
 
 

+ 2 - 2
arch/s390/include/asm/processor.h

@@ -166,14 +166,14 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
  */
  */
 #define start_thread(regs, new_psw, new_stackp) do {			\
 #define start_thread(regs, new_psw, new_stackp) do {			\
 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;	\
 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;	\
-	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;			\
+	regs->psw.addr	= new_psw;					\
 	regs->gprs[15]	= new_stackp;					\
 	regs->gprs[15]	= new_stackp;					\
 	execve_tail();							\
 	execve_tail();							\
 } while (0)
 } while (0)
 
 
 #define start_thread31(regs, new_psw, new_stackp) do {			\
 #define start_thread31(regs, new_psw, new_stackp) do {			\
 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_BA;			\
 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_BA;			\
-	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;			\
+	regs->psw.addr	= new_psw;					\
 	regs->gprs[15]	= new_stackp;					\
 	regs->gprs[15]	= new_stackp;					\
 	crst_table_downgrade(current->mm, 1UL << 31);			\
 	crst_table_downgrade(current->mm, 1UL << 31);			\
 	execve_tail();							\
 	execve_tail();							\

+ 3 - 3
arch/s390/include/asm/ptrace.h

@@ -149,7 +149,7 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
 #define arch_has_block_step()	(1)
 #define arch_has_block_step()	(1)
 
 
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
-#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
+#define instruction_pointer(regs) ((regs)->psw.addr)
 #define user_stack_pointer(regs)((regs)->gprs[15])
 #define user_stack_pointer(regs)((regs)->gprs[15])
 #define profile_pc(regs) instruction_pointer(regs)
 #define profile_pc(regs) instruction_pointer(regs)
 
 
@@ -161,7 +161,7 @@ static inline long regs_return_value(struct pt_regs *regs)
 static inline void instruction_pointer_set(struct pt_regs *regs,
 static inline void instruction_pointer_set(struct pt_regs *regs,
 					   unsigned long val)
 					   unsigned long val)
 {
 {
-	regs->psw.addr = val | PSW_ADDR_AMODE;
+	regs->psw.addr = val;
 }
 }
 
 
 int regs_query_register_offset(const char *name);
 int regs_query_register_offset(const char *name);
@@ -171,7 +171,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
 
 
 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 {
 {
-	return regs->gprs[15] & PSW_ADDR_INSN;
+	return regs->gprs[15];
 }
 }
 
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASSEMBLY__ */

+ 2 - 1
arch/s390/include/uapi/asm/unistd.h

@@ -310,7 +310,8 @@
 #define __NR_recvmsg		372
 #define __NR_recvmsg		372
 #define __NR_shutdown		373
 #define __NR_shutdown		373
 #define __NR_mlock2		374
 #define __NR_mlock2		374
-#define NR_syscalls 375
+#define __NR_copy_file_range	375
+#define NR_syscalls 376
 
 
 /* 
 /* 
  * There are some system calls that are not present on 64 bit, some
  * There are some system calls that are not present on 64 bit, some

+ 1 - 0
arch/s390/kernel/compat_wrapper.c

@@ -177,3 +177,4 @@ COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
 COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
 COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
 COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
 COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
+COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);

+ 0 - 2
arch/s390/kernel/crash_dump.c

@@ -59,8 +59,6 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
 	struct save_area *sa;
 	struct save_area *sa;
 
 
 	sa = (void *) memblock_alloc(sizeof(*sa), 8);
 	sa = (void *) memblock_alloc(sizeof(*sa), 8);
-	if (!sa)
-		return NULL;
 	if (is_boot_cpu)
 	if (is_boot_cpu)
 		list_add(&sa->list, &dump_save_areas);
 		list_add(&sa->list, &dump_save_areas);
 	else
 	else

+ 1 - 1
arch/s390/kernel/debug.c

@@ -1470,7 +1470,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
 		except_str = "*";
 		except_str = "*";
 	else
 	else
 		except_str = "-";
 		except_str = "-";
-	caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
+	caller = (unsigned long) entry->caller;
 	rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p  ",
 	rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p  ",
 		      area, (long long)time_spec.tv_sec,
 		      area, (long long)time_spec.tv_sec,
 		      time_spec.tv_nsec / 1000, level, except_str,
 		      time_spec.tv_nsec / 1000, level, except_str,

+ 4 - 5
arch/s390/kernel/dumpstack.c

@@ -34,22 +34,21 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
 	unsigned long addr;
 	unsigned long addr;
 
 
 	while (1) {
 	while (1) {
-		sp = sp & PSW_ADDR_INSN;
 		if (sp < low || sp > high - sizeof(*sf))
 		if (sp < low || sp > high - sizeof(*sf))
 			return sp;
 			return sp;
 		sf = (struct stack_frame *) sp;
 		sf = (struct stack_frame *) sp;
-		addr = sf->gprs[8] & PSW_ADDR_INSN;
+		addr = sf->gprs[8];
 		printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
 		printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
 		/* Follow the backchain. */
 		/* Follow the backchain. */
 		while (1) {
 		while (1) {
 			low = sp;
 			low = sp;
-			sp = sf->back_chain & PSW_ADDR_INSN;
+			sp = sf->back_chain;
 			if (!sp)
 			if (!sp)
 				break;
 				break;
 			if (sp <= low || sp > high - sizeof(*sf))
 			if (sp <= low || sp > high - sizeof(*sf))
 				return sp;
 				return sp;
 			sf = (struct stack_frame *) sp;
 			sf = (struct stack_frame *) sp;
-			addr = sf->gprs[8] & PSW_ADDR_INSN;
+			addr = sf->gprs[8];
 			printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 			printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 		}
 		}
 		/* Zero backchain detected, check for interrupt frame. */
 		/* Zero backchain detected, check for interrupt frame. */
@@ -57,7 +56,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
 		if (sp <= low || sp > high - sizeof(*regs))
 		if (sp <= low || sp > high - sizeof(*regs))
 			return sp;
 			return sp;
 		regs = (struct pt_regs *) sp;
 		regs = (struct pt_regs *) sp;
-		addr = regs->psw.addr & PSW_ADDR_INSN;
+		addr = regs->psw.addr;
 		printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 		printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 		low = sp;
 		low = sp;
 		sp = regs->gprs[15];
 		sp = regs->gprs[15];

+ 4 - 4
arch/s390/kernel/early.c

@@ -252,14 +252,14 @@ static void early_pgm_check_handler(void)
 	unsigned long addr;
 	unsigned long addr;
 
 
 	addr = S390_lowcore.program_old_psw.addr;
 	addr = S390_lowcore.program_old_psw.addr;
-	fixup = search_exception_tables(addr & PSW_ADDR_INSN);
+	fixup = search_exception_tables(addr);
 	if (!fixup)
 	if (!fixup)
 		disabled_wait(0);
 		disabled_wait(0);
 	/* Disable low address protection before storing into lowcore. */
 	/* Disable low address protection before storing into lowcore. */
 	__ctl_store(cr0, 0, 0);
 	__ctl_store(cr0, 0, 0);
 	cr0_new = cr0 & ~(1UL << 28);
 	cr0_new = cr0 & ~(1UL << 28);
 	__ctl_load(cr0_new, 0, 0);
 	__ctl_load(cr0_new, 0, 0);
-	S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
+	S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
 	__ctl_load(cr0, 0, 0);
 	__ctl_load(cr0, 0, 0);
 }
 }
 
 
@@ -268,9 +268,9 @@ static noinline __init void setup_lowcore_early(void)
 	psw_t psw;
 	psw_t psw;
 
 
 	psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
 	psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
-	psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
+	psw.addr = (unsigned long) s390_base_ext_handler;
 	S390_lowcore.external_new_psw = psw;
 	S390_lowcore.external_new_psw = psw;
-	psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+	psw.addr = (unsigned long) s390_base_pgm_handler;
 	S390_lowcore.program_new_psw = psw;
 	S390_lowcore.program_new_psw = psw;
 	s390_base_pgm_handler_fn = early_pgm_check_handler;
 	s390_base_pgm_handler_fn = early_pgm_check_handler;
 }
 }

+ 1 - 1
arch/s390/kernel/ftrace.c

@@ -203,7 +203,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
 		goto out;
 		goto out;
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		goto out;
 		goto out;
-	ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
+	ip -= MCOUNT_INSN_SIZE;
 	trace.func = ip;
 	trace.func = ip;
 	trace.depth = current->curr_ret_stack + 1;
 	trace.depth = current->curr_ret_stack + 1;
 	/* Only trace if the calling function expects to. */
 	/* Only trace if the calling function expects to. */

+ 2 - 2
arch/s390/kernel/ipl.c

@@ -2057,12 +2057,12 @@ void s390_reset_system(void)
 	/* Set new machine check handler */
 	/* Set new machine check handler */
 	S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
 	S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
 	S390_lowcore.mcck_new_psw.addr =
 	S390_lowcore.mcck_new_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
+		(unsigned long) s390_base_mcck_handler;
 
 
 	/* Set new program check handler */
 	/* Set new program check handler */
 	S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
 	S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
 	S390_lowcore.program_new_psw.addr =
 	S390_lowcore.program_new_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+		(unsigned long) s390_base_pgm_handler;
 
 
 	/*
 	/*
 	 * Clear subchannel ID and number to signal new kernel that no CCW or
 	 * Clear subchannel ID and number to signal new kernel that no CCW or

+ 8 - 8
arch/s390/kernel/kprobes.c

@@ -226,7 +226,7 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
 	__ctl_load(per_kprobe, 9, 11);
 	__ctl_load(per_kprobe, 9, 11);
 	regs->psw.mask |= PSW_MASK_PER;
 	regs->psw.mask |= PSW_MASK_PER;
 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-	regs->psw.addr = ip | PSW_ADDR_AMODE;
+	regs->psw.addr = ip;
 }
 }
 NOKPROBE_SYMBOL(enable_singlestep);
 NOKPROBE_SYMBOL(enable_singlestep);
 
 
@@ -238,7 +238,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb,
 	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
 	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
 	regs->psw.mask &= ~PSW_MASK_PER;
 	regs->psw.mask &= ~PSW_MASK_PER;
 	regs->psw.mask |= kcb->kprobe_saved_imask;
 	regs->psw.mask |= kcb->kprobe_saved_imask;
-	regs->psw.addr = ip | PSW_ADDR_AMODE;
+	regs->psw.addr = ip;
 }
 }
 NOKPROBE_SYMBOL(disable_singlestep);
 NOKPROBE_SYMBOL(disable_singlestep);
 
 
@@ -310,7 +310,7 @@ static int kprobe_handler(struct pt_regs *regs)
 	 */
 	 */
 	preempt_disable();
 	preempt_disable();
 	kcb = get_kprobe_ctlblk();
 	kcb = get_kprobe_ctlblk();
-	p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
+	p = get_kprobe((void *)(regs->psw.addr - 2));
 
 
 	if (p) {
 	if (p) {
 		if (kprobe_running()) {
 		if (kprobe_running()) {
@@ -460,7 +460,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 			break;
 			break;
 	}
 	}
 
 
-	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+	regs->psw.addr = orig_ret_address;
 
 
 	pop_kprobe(get_kprobe_ctlblk());
 	pop_kprobe(get_kprobe_ctlblk());
 	kretprobe_hash_unlock(current, &flags);
 	kretprobe_hash_unlock(current, &flags);
@@ -490,7 +490,7 @@ NOKPROBE_SYMBOL(trampoline_probe_handler);
 static void resume_execution(struct kprobe *p, struct pt_regs *regs)
 static void resume_execution(struct kprobe *p, struct pt_regs *regs)
 {
 {
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-	unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
+	unsigned long ip = regs->psw.addr;
 	int fixup = probe_get_fixup_type(p->ainsn.insn);
 	int fixup = probe_get_fixup_type(p->ainsn.insn);
 
 
 	/* Check if the kprobes location is an enabled ftrace caller */
 	/* Check if the kprobes location is an enabled ftrace caller */
@@ -605,9 +605,9 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
 		 * In case the user-specified fault handler returned
 		 * In case the user-specified fault handler returned
 		 * zero, try to fix up.
 		 * zero, try to fix up.
 		 */
 		 */
-		entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+		entry = search_exception_tables(regs->psw.addr);
 		if (entry) {
 		if (entry) {
-			regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
+			regs->psw.addr = extable_fixup(entry);
 			return 1;
 			return 1;
 		}
 		}
 
 
@@ -683,7 +683,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
 	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
 
 
 	/* setup return addr to the jprobe handler routine */
 	/* setup return addr to the jprobe handler routine */
-	regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
+	regs->psw.addr = (unsigned long) jp->entry;
 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 
 
 	/* r15 is the stack pointer */
 	/* r15 is the stack pointer */

+ 5 - 7
arch/s390/kernel/perf_event.c

@@ -74,7 +74,7 @@ static unsigned long guest_is_user_mode(struct pt_regs *regs)
 
 
 static unsigned long instruction_pointer_guest(struct pt_regs *regs)
 static unsigned long instruction_pointer_guest(struct pt_regs *regs)
 {
 {
-	return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
+	return sie_block(regs)->gpsw.addr;
 }
 }
 
 
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
@@ -231,29 +231,27 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
 	struct pt_regs *regs;
 	struct pt_regs *regs;
 
 
 	while (1) {
 	while (1) {
-		sp = sp & PSW_ADDR_INSN;
 		if (sp < low || sp > high - sizeof(*sf))
 		if (sp < low || sp > high - sizeof(*sf))
 			return sp;
 			return sp;
 		sf = (struct stack_frame *) sp;
 		sf = (struct stack_frame *) sp;
-		perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
+		perf_callchain_store(entry, sf->gprs[8]);
 		/* Follow the backchain. */
 		/* Follow the backchain. */
 		while (1) {
 		while (1) {
 			low = sp;
 			low = sp;
-			sp = sf->back_chain & PSW_ADDR_INSN;
+			sp = sf->back_chain;
 			if (!sp)
 			if (!sp)
 				break;
 				break;
 			if (sp <= low || sp > high - sizeof(*sf))
 			if (sp <= low || sp > high - sizeof(*sf))
 				return sp;
 				return sp;
 			sf = (struct stack_frame *) sp;
 			sf = (struct stack_frame *) sp;
-			perf_callchain_store(entry,
-					     sf->gprs[8] & PSW_ADDR_INSN);
+			perf_callchain_store(entry, sf->gprs[8]);
 		}
 		}
 		/* Zero backchain detected, check for interrupt frame. */
 		/* Zero backchain detected, check for interrupt frame. */
 		sp = (unsigned long) (sf + 1);
 		sp = (unsigned long) (sf + 1);
 		if (sp <= low || sp > high - sizeof(*regs))
 		if (sp <= low || sp > high - sizeof(*regs))
 			return sp;
 			return sp;
 		regs = (struct pt_regs *) sp;
 		regs = (struct pt_regs *) sp;
-		perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
+		perf_callchain_store(entry, sf->gprs[8]);
 		low = sp;
 		low = sp;
 		sp = regs->gprs[15];
 		sp = regs->gprs[15];
 	}
 	}

+ 6 - 6
arch/s390/kernel/process.c

@@ -56,10 +56,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 		return 0;
 		return 0;
 	low = task_stack_page(tsk);
 	low = task_stack_page(tsk);
 	high = (struct stack_frame *) task_pt_regs(tsk);
 	high = (struct stack_frame *) task_pt_regs(tsk);
-	sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN);
+	sf = (struct stack_frame *) tsk->thread.ksp;
 	if (sf <= low || sf > high)
 	if (sf <= low || sf > high)
 		return 0;
 		return 0;
-	sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+	sf = (struct stack_frame *) sf->back_chain;
 	if (sf <= low || sf > high)
 	if (sf <= low || sf > high)
 		return 0;
 		return 0;
 	return sf->gprs[8];
 	return sf->gprs[8];
@@ -154,7 +154,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 		memset(&frame->childregs, 0, sizeof(struct pt_regs));
 		memset(&frame->childregs, 0, sizeof(struct pt_regs));
 		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
 		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
 				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
 				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
-		frame->childregs.psw.addr = PSW_ADDR_AMODE |
+		frame->childregs.psw.addr =
 				(unsigned long) kernel_thread_starter;
 				(unsigned long) kernel_thread_starter;
 		frame->childregs.gprs[9] = new_stackp; /* function */
 		frame->childregs.gprs[9] = new_stackp; /* function */
 		frame->childregs.gprs[10] = arg;
 		frame->childregs.gprs[10] = arg;
@@ -220,14 +220,14 @@ unsigned long get_wchan(struct task_struct *p)
 		return 0;
 		return 0;
 	low = task_stack_page(p);
 	low = task_stack_page(p);
 	high = (struct stack_frame *) task_pt_regs(p);
 	high = (struct stack_frame *) task_pt_regs(p);
-	sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
+	sf = (struct stack_frame *) p->thread.ksp;
 	if (sf <= low || sf > high)
 	if (sf <= low || sf > high)
 		return 0;
 		return 0;
 	for (count = 0; count < 16; count++) {
 	for (count = 0; count < 16; count++) {
-		sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+		sf = (struct stack_frame *) sf->back_chain;
 		if (sf <= low || sf > high)
 		if (sf <= low || sf > high)
 			return 0;
 			return 0;
-		return_address = sf->gprs[8] & PSW_ADDR_INSN;
+		return_address = sf->gprs[8];
 		if (!in_sched_functions(return_address))
 		if (!in_sched_functions(return_address))
 			return return_address;
 			return return_address;
 	}
 	}

+ 2 - 4
arch/s390/kernel/ptrace.c

@@ -84,7 +84,7 @@ void update_cr_regs(struct task_struct *task)
 		if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
 		if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
 			new.control |= PER_EVENT_IFETCH;
 			new.control |= PER_EVENT_IFETCH;
 		new.start = 0;
 		new.start = 0;
-		new.end = PSW_ADDR_INSN;
+		new.end = -1UL;
 	}
 	}
 
 
 	/* Take care of the PER enablement bit in the PSW. */
 	/* Take care of the PER enablement bit in the PSW. */
@@ -148,7 +148,7 @@ static inline unsigned long __peek_user_per(struct task_struct *child,
 	else if (addr == (addr_t) &dummy->cr11)
 	else if (addr == (addr_t) &dummy->cr11)
 		/* End address of the active per set. */
 		/* End address of the active per set. */
 		return test_thread_flag(TIF_SINGLE_STEP) ?
 		return test_thread_flag(TIF_SINGLE_STEP) ?
-			PSW_ADDR_INSN : child->thread.per_user.end;
+			-1UL : child->thread.per_user.end;
 	else if (addr == (addr_t) &dummy->bits)
 	else if (addr == (addr_t) &dummy->bits)
 		/* Single-step bit. */
 		/* Single-step bit. */
 		return test_thread_flag(TIF_SINGLE_STEP) ?
 		return test_thread_flag(TIF_SINGLE_STEP) ?
@@ -495,8 +495,6 @@ long arch_ptrace(struct task_struct *child, long request,
 		}
 		}
 		return 0;
 		return 0;
 	default:
 	default:
-		/* Removing high order bit from addr (only for 31 bit). */
-		addr &= PSW_ADDR_INSN;
 		return ptrace_request(child, request, addr, data);
 		return ptrace_request(child, request, addr, data);
 	}
 	}
 }
 }

+ 6 - 10
arch/s390/kernel/setup.c

@@ -301,25 +301,21 @@ static void __init setup_lowcore(void)
 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
 	lc->restart_psw.mask = PSW_KERNEL_BITS;
 	lc->restart_psw.mask = PSW_KERNEL_BITS;
-	lc->restart_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+	lc->restart_psw.addr = (unsigned long) restart_int_handler;
 	lc->external_new_psw.mask = PSW_KERNEL_BITS |
 	lc->external_new_psw.mask = PSW_KERNEL_BITS |
 		PSW_MASK_DAT | PSW_MASK_MCHECK;
 		PSW_MASK_DAT | PSW_MASK_MCHECK;
-	lc->external_new_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
+	lc->external_new_psw.addr = (unsigned long) ext_int_handler;
 	lc->svc_new_psw.mask = PSW_KERNEL_BITS |
 	lc->svc_new_psw.mask = PSW_KERNEL_BITS |
 		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
 		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
-	lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
+	lc->svc_new_psw.addr = (unsigned long) system_call;
 	lc->program_new_psw.mask = PSW_KERNEL_BITS |
 	lc->program_new_psw.mask = PSW_KERNEL_BITS |
 		PSW_MASK_DAT | PSW_MASK_MCHECK;
 		PSW_MASK_DAT | PSW_MASK_MCHECK;
-	lc->program_new_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
+	lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
 	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
 	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
-	lc->mcck_new_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
+	lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
 	lc->io_new_psw.mask = PSW_KERNEL_BITS |
 	lc->io_new_psw.mask = PSW_KERNEL_BITS |
 		PSW_MASK_DAT | PSW_MASK_MCHECK;
 		PSW_MASK_DAT | PSW_MASK_MCHECK;
-	lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
+	lc->io_new_psw.addr = (unsigned long) io_int_handler;
 	lc->clock_comparator = -1ULL;
 	lc->clock_comparator = -1ULL;
 	lc->kernel_stack = ((unsigned long) &init_thread_union)
 	lc->kernel_stack = ((unsigned long) &init_thread_union)
 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);

+ 6 - 7
arch/s390/kernel/signal.c

@@ -331,13 +331,13 @@ static int setup_frame(int sig, struct k_sigaction *ka,
 	/* Set up to return from userspace.  If provided, use a stub
 	/* Set up to return from userspace.  If provided, use a stub
 	   already in userspace.  */
 	   already in userspace.  */
 	if (ka->sa.sa_flags & SA_RESTORER) {
 	if (ka->sa.sa_flags & SA_RESTORER) {
-		restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
+		restorer = (unsigned long) ka->sa.sa_restorer;
 	} else {
 	} else {
 		/* Signal frame without vector registers are short ! */
 		/* Signal frame without vector registers are short ! */
 		__u16 __user *svc = (void __user *) frame + frame_size - 2;
 		__u16 __user *svc = (void __user *) frame + frame_size - 2;
 		if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
 		if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
 			return -EFAULT;
 			return -EFAULT;
-		restorer = (unsigned long) svc | PSW_ADDR_AMODE;
+		restorer = (unsigned long) svc;
 	}
 	}
 
 
 	/* Set up registers for signal handler */
 	/* Set up registers for signal handler */
@@ -347,7 +347,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
 	regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
 	regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
 		(PSW_USER_BITS & PSW_MASK_ASC) |
 		(PSW_USER_BITS & PSW_MASK_ASC) |
 		(regs->psw.mask & ~PSW_MASK_ASC);
 		(regs->psw.mask & ~PSW_MASK_ASC);
-	regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+	regs->psw.addr = (unsigned long) ka->sa.sa_handler;
 
 
 	regs->gprs[2] = sig;
 	regs->gprs[2] = sig;
 	regs->gprs[3] = (unsigned long) &frame->sc;
 	regs->gprs[3] = (unsigned long) &frame->sc;
@@ -394,13 +394,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 	/* Set up to return from userspace.  If provided, use a stub
 	/* Set up to return from userspace.  If provided, use a stub
 	   already in userspace.  */
 	   already in userspace.  */
 	if (ksig->ka.sa.sa_flags & SA_RESTORER) {
 	if (ksig->ka.sa.sa_flags & SA_RESTORER) {
-		restorer = (unsigned long)
-			ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE;
+		restorer = (unsigned long) ksig->ka.sa.sa_restorer;
 	} else {
 	} else {
 		__u16 __user *svc = &frame->svc_insn;
 		__u16 __user *svc = &frame->svc_insn;
 		if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
 		if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
 			return -EFAULT;
 			return -EFAULT;
-		restorer = (unsigned long) svc | PSW_ADDR_AMODE;
+		restorer = (unsigned long) svc;
 	}
 	}
 
 
 	/* Create siginfo on the signal stack */
 	/* Create siginfo on the signal stack */
@@ -426,7 +425,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 	regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
 	regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
 		(PSW_USER_BITS & PSW_MASK_ASC) |
 		(PSW_USER_BITS & PSW_MASK_ASC) |
 		(regs->psw.mask & ~PSW_MASK_ASC);
 		(regs->psw.mask & ~PSW_MASK_ASC);
-	regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE;
+	regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler;
 
 
 	regs->gprs[2] = ksig->sig;
 	regs->gprs[2] = ksig->sig;
 	regs->gprs[3] = (unsigned long) &frame->info;
 	regs->gprs[3] = (unsigned long) &frame->info;

+ 0 - 2
arch/s390/kernel/smp.c

@@ -623,8 +623,6 @@ void __init smp_save_dump_cpus(void)
 		return;
 		return;
 	/* Allocate a page as dumping area for the store status sigps */
 	/* Allocate a page as dumping area for the store status sigps */
 	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
 	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
-	if (!page)
-		panic("could not allocate memory for save area\n");
 	/* Set multi-threading state to the previous system. */
 	/* Set multi-threading state to the previous system. */
 	pcpu_set_smt(sclp.mtid_prev);
 	pcpu_set_smt(sclp.mtid_prev);
 	boot_cpu_addr = stap();
 	boot_cpu_addr = stap();

+ 5 - 6
arch/s390/kernel/stacktrace.c

@@ -21,12 +21,11 @@ static unsigned long save_context_stack(struct stack_trace *trace,
 	unsigned long addr;
 	unsigned long addr;
 
 
 	while(1) {
 	while(1) {
-		sp &= PSW_ADDR_INSN;
 		if (sp < low || sp > high)
 		if (sp < low || sp > high)
 			return sp;
 			return sp;
 		sf = (struct stack_frame *)sp;
 		sf = (struct stack_frame *)sp;
 		while(1) {
 		while(1) {
-			addr = sf->gprs[8] & PSW_ADDR_INSN;
+			addr = sf->gprs[8];
 			if (!trace->skip)
 			if (!trace->skip)
 				trace->entries[trace->nr_entries++] = addr;
 				trace->entries[trace->nr_entries++] = addr;
 			else
 			else
@@ -34,7 +33,7 @@ static unsigned long save_context_stack(struct stack_trace *trace,
 			if (trace->nr_entries >= trace->max_entries)
 			if (trace->nr_entries >= trace->max_entries)
 				return sp;
 				return sp;
 			low = sp;
 			low = sp;
-			sp = sf->back_chain & PSW_ADDR_INSN;
+			sp = sf->back_chain;
 			if (!sp)
 			if (!sp)
 				break;
 				break;
 			if (sp <= low || sp > high - sizeof(*sf))
 			if (sp <= low || sp > high - sizeof(*sf))
@@ -46,7 +45,7 @@ static unsigned long save_context_stack(struct stack_trace *trace,
 		if (sp <= low || sp > high - sizeof(*regs))
 		if (sp <= low || sp > high - sizeof(*regs))
 			return sp;
 			return sp;
 		regs = (struct pt_regs *)sp;
 		regs = (struct pt_regs *)sp;
-		addr = regs->psw.addr & PSW_ADDR_INSN;
+		addr = regs->psw.addr;
 		if (savesched || !in_sched_functions(addr)) {
 		if (savesched || !in_sched_functions(addr)) {
 			if (!trace->skip)
 			if (!trace->skip)
 				trace->entries[trace->nr_entries++] = addr;
 				trace->entries[trace->nr_entries++] = addr;
@@ -65,7 +64,7 @@ void save_stack_trace(struct stack_trace *trace)
 	register unsigned long sp asm ("15");
 	register unsigned long sp asm ("15");
 	unsigned long orig_sp, new_sp;
 	unsigned long orig_sp, new_sp;
 
 
-	orig_sp = sp & PSW_ADDR_INSN;
+	orig_sp = sp;
 	new_sp = save_context_stack(trace, orig_sp,
 	new_sp = save_context_stack(trace, orig_sp,
 				    S390_lowcore.panic_stack - PAGE_SIZE,
 				    S390_lowcore.panic_stack - PAGE_SIZE,
 				    S390_lowcore.panic_stack, 1);
 				    S390_lowcore.panic_stack, 1);
@@ -86,7 +85,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
 {
 	unsigned long sp, low, high;
 	unsigned long sp, low, high;
 
 
-	sp = tsk->thread.ksp & PSW_ADDR_INSN;
+	sp = tsk->thread.ksp;
 	low = (unsigned long) task_stack_page(tsk);
 	low = (unsigned long) task_stack_page(tsk);
 	high = (unsigned long) task_pt_regs(tsk);
 	high = (unsigned long) task_pt_regs(tsk);
 	save_context_stack(trace, sp, low, high, 0);
 	save_context_stack(trace, sp, low, high, 0);

+ 1 - 0
arch/s390/kernel/syscalls.S

@@ -383,3 +383,4 @@ SYSCALL(sys_recvfrom,compat_sys_recvfrom)
 SYSCALL(sys_recvmsg,compat_sys_recvmsg)
 SYSCALL(sys_recvmsg,compat_sys_recvmsg)
 SYSCALL(sys_shutdown,sys_shutdown)
 SYSCALL(sys_shutdown,sys_shutdown)
 SYSCALL(sys_mlock2,compat_sys_mlock2)
 SYSCALL(sys_mlock2,compat_sys_mlock2)
+SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */

+ 5 - 6
arch/s390/kernel/traps.c

@@ -32,8 +32,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
 		address = *(unsigned long *)(current->thread.trap_tdb + 24);
 		address = *(unsigned long *)(current->thread.trap_tdb + 24);
 	else
 	else
 		address = regs->psw.addr;
 		address = regs->psw.addr;
-	return (void __user *)
-		((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
+	return (void __user *) (address - (regs->int_code >> 16));
 }
 }
 
 
 static inline void report_user_fault(struct pt_regs *regs, int signr)
 static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -46,7 +45,7 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
 		return;
 		return;
 	printk("User process fault: interruption code %04x ilc:%d ",
 	printk("User process fault: interruption code %04x ilc:%d ",
 	       regs->int_code & 0xffff, regs->int_code >> 17);
 	       regs->int_code & 0xffff, regs->int_code >> 17);
-	print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
+	print_vma_addr("in ", regs->psw.addr);
 	printk("\n");
 	printk("\n");
 	show_regs(regs);
 	show_regs(regs);
 }
 }
@@ -69,13 +68,13 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
 		report_user_fault(regs, si_signo);
 		report_user_fault(regs, si_signo);
         } else {
         } else {
                 const struct exception_table_entry *fixup;
                 const struct exception_table_entry *fixup;
-                fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+		fixup = search_exception_tables(regs->psw.addr);
                 if (fixup)
                 if (fixup)
-			regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
+			regs->psw.addr = extable_fixup(fixup);
 		else {
 		else {
 			enum bug_trap_type btt;
 			enum bug_trap_type btt;
 
 
-			btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
+			btt = report_bug(regs->psw.addr, regs);
 			if (btt == BUG_TRAP_TYPE_WARN)
 			if (btt == BUG_TRAP_TYPE_WARN)
 				return;
 				return;
 			die(regs, str);
 			die(regs, str);

+ 1 - 0
arch/s390/kvm/Kconfig

@@ -29,6 +29,7 @@ config KVM
 	select HAVE_KVM_IRQFD
 	select HAVE_KVM_IRQFD
 	select HAVE_KVM_IRQ_ROUTING
 	select HAVE_KVM_IRQ_ROUTING
 	select SRCU
 	select SRCU
+	select KVM_VFIO
 	---help---
 	---help---
 	  Support hosting paravirtualized guest machines using the SIE
 	  Support hosting paravirtualized guest machines using the SIE
 	  virtualization capability on the mainframe. This should work
 	  virtualization capability on the mainframe. This should work

+ 1 - 1
arch/s390/kvm/Makefile

@@ -7,7 +7,7 @@
 # as published by the Free Software Foundation.
 # as published by the Free Software Foundation.
 
 
 KVM := ../../../virt/kvm
 KVM := ../../../virt/kvm
-common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o
+common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
 
 
 ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
 ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
 
 

+ 2 - 2
arch/s390/kvm/guestdbg.c

@@ -116,7 +116,7 @@ static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
 	if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
 	if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
 		*cr9 &= ~PER_CONTROL_ALTERATION;
 		*cr9 &= ~PER_CONTROL_ALTERATION;
 		*cr10 = 0;
 		*cr10 = 0;
-		*cr11 = PSW_ADDR_INSN;
+		*cr11 = -1UL;
 	} else {
 	} else {
 		*cr9 &= ~PER_CONTROL_ALTERATION;
 		*cr9 &= ~PER_CONTROL_ALTERATION;
 		*cr9 |= PER_EVENT_STORE;
 		*cr9 |= PER_EVENT_STORE;
@@ -159,7 +159,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
 		vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
 		vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
 		vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
 		vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
 		vcpu->arch.sie_block->gcr[10] = 0;
 		vcpu->arch.sie_block->gcr[10] = 0;
-		vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN;
+		vcpu->arch.sie_block->gcr[11] = -1UL;
 	}
 	}
 
 
 	if (guestdbg_hw_bp_enabled(vcpu)) {
 	if (guestdbg_hw_bp_enabled(vcpu)) {

+ 42 - 79
arch/s390/kvm/kvm-s390.c

@@ -1423,44 +1423,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 	return 0;
 	return 0;
 }
 }
 
 
-/*
- * Backs up the current FP/VX register save area on a particular
- * destination.  Used to switch between different register save
- * areas.
- */
-static inline void save_fpu_to(struct fpu *dst)
-{
-	dst->fpc = current->thread.fpu.fpc;
-	dst->regs = current->thread.fpu.regs;
-}
-
-/*
- * Switches the FP/VX register save area from which to lazy
- * restore register contents.
- */
-static inline void load_fpu_from(struct fpu *from)
-{
-	current->thread.fpu.fpc = from->fpc;
-	current->thread.fpu.regs = from->regs;
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 {
 	/* Save host register state */
 	/* Save host register state */
 	save_fpu_regs();
 	save_fpu_regs();
-	save_fpu_to(&vcpu->arch.host_fpregs);
-
-	if (test_kvm_facility(vcpu->kvm, 129)) {
-		current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
-		/*
-		 * Use the register save area in the SIE-control block
-		 * for register restore and save in kvm_arch_vcpu_put()
-		 */
-		current->thread.fpu.vxrs =
-			(__vector128 *)&vcpu->run->s.regs.vrs;
-	} else
-		load_fpu_from(&vcpu->arch.guest_fpregs);
+	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
 
 
+	/* Depending on MACHINE_HAS_VX, data stored to vrs either
+	 * has vector register or floating point register format.
+	 */
+	current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
 	if (test_fp_ctl(current->thread.fpu.fpc))
 	if (test_fp_ctl(current->thread.fpu.fpc))
 		/* User space provided an invalid FPC, let's clear it */
 		/* User space provided an invalid FPC, let's clear it */
 		current->thread.fpu.fpc = 0;
 		current->thread.fpu.fpc = 0;
@@ -1476,19 +1450,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 	gmap_disable(vcpu->arch.gmap);
 	gmap_disable(vcpu->arch.gmap);
 
 
+	/* Save guest register state */
 	save_fpu_regs();
 	save_fpu_regs();
+	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
 
 
-	if (test_kvm_facility(vcpu->kvm, 129))
-		/*
-		 * kvm_arch_vcpu_load() set up the register save area to
-		 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
-		 * are already saved.  Only the floating-point control must be
-		 * copied.
-		 */
-		vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-	else
-		save_fpu_to(&vcpu->arch.guest_fpregs);
-	load_fpu_from(&vcpu->arch.host_fpregs);
+	/* Restore host register state */
+	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
 
 
 	save_access_regs(vcpu->run->s.regs.acrs);
 	save_access_regs(vcpu->run->s.regs.acrs);
 	restore_access_regs(vcpu->arch.host_acrs);
 	restore_access_regs(vcpu->arch.host_acrs);
@@ -1506,8 +1474,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
-	vcpu->arch.guest_fpregs.fpc = 0;
-	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+	/* make sure the new fpc will be lazily loaded */
+	save_fpu_regs();
+	current->thread.fpu.fpc = 0;
 	vcpu->arch.sie_block->gbea = 1;
 	vcpu->arch.sie_block->gbea = 1;
 	vcpu->arch.sie_block->pp = 0;
 	vcpu->arch.sie_block->pp = 0;
 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@@ -1648,17 +1617,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 	vcpu->arch.local_int.wq = &vcpu->wq;
 	vcpu->arch.local_int.wq = &vcpu->wq;
 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 
 
-	/*
-	 * Allocate a save area for floating-point registers.  If the vector
-	 * extension is available, register contents are saved in the SIE
-	 * control block.  The allocated save area is still required in
-	 * particular places, for example, in kvm_s390_vcpu_store_status().
-	 */
-	vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
-					       GFP_KERNEL);
-	if (!vcpu->arch.guest_fpregs.fprs)
-		goto out_free_sie_block;
-
 	rc = kvm_vcpu_init(vcpu, kvm, id);
 	rc = kvm_vcpu_init(vcpu, kvm, id);
 	if (rc)
 	if (rc)
 		goto out_free_sie_block;
 		goto out_free_sie_block;
@@ -1879,19 +1837,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 {
+	/* make sure the new values will be lazily loaded */
+	save_fpu_regs();
 	if (test_fp_ctl(fpu->fpc))
 	if (test_fp_ctl(fpu->fpc))
 		return -EINVAL;
 		return -EINVAL;
-	memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
-	save_fpu_regs();
-	load_fpu_from(&vcpu->arch.guest_fpregs);
+	current->thread.fpu.fpc = fpu->fpc;
+	if (MACHINE_HAS_VX)
+		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+	else
+		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
 	return 0;
 	return 0;
 }
 }
 
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 {
-	memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
-	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+	/* make sure we have the latest values */
+	save_fpu_regs();
+	if (MACHINE_HAS_VX)
+		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+	else
+		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+	fpu->fpc = current->thread.fpu.fpc;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2396,6 +2362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 {
 {
 	unsigned char archmode = 1;
 	unsigned char archmode = 1;
+	freg_t fprs[NUM_FPRS];
 	unsigned int px;
 	unsigned int px;
 	u64 clkcomp;
 	u64 clkcomp;
 	int rc;
 	int rc;
@@ -2411,8 +2378,16 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 		gpa = px;
 		gpa = px;
 	} else
 	} else
 		gpa -= __LC_FPREGS_SAVE_AREA;
 		gpa -= __LC_FPREGS_SAVE_AREA;
-	rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
-			     vcpu->arch.guest_fpregs.fprs, 128);
+
+	/* manually convert vector registers if necessary */
+	if (MACHINE_HAS_VX) {
+		convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
+		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+				     fprs, 128);
+	} else {
+		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+				     vcpu->run->s.regs.vrs, 128);
+	}
 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
 			      vcpu->run->s.regs.gprs, 128);
 			      vcpu->run->s.regs.gprs, 128);
 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
@@ -2420,7 +2395,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
 			      &px, 4);
 			      &px, 4);
 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
-			      &vcpu->arch.guest_fpregs.fpc, 4);
+			      &vcpu->run->s.regs.fpc, 4);
 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
 			      &vcpu->arch.sie_block->todpr, 4);
 			      &vcpu->arch.sie_block->todpr, 4);
 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
@@ -2443,19 +2418,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
 	 * it into the save area
 	 * it into the save area
 	 */
 	 */
 	save_fpu_regs();
 	save_fpu_regs();
-	if (test_kvm_facility(vcpu->kvm, 129)) {
-		/*
-		 * If the vector extension is available, the vector registers
-		 * which overlaps with floating-point registers are saved in
-		 * the SIE-control block.  Hence, extract the floating-point
-		 * registers and the FPC value and store them in the
-		 * guest_fpregs structure.
-		 */
-		vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
-		convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
-				 current->thread.fpu.vxrs);
-	} else
-		save_fpu_to(&vcpu->arch.guest_fpregs);
+	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
 	save_access_regs(vcpu->run->s.regs.acrs);
 	save_access_regs(vcpu->run->s.regs.acrs);
 
 
 	return kvm_s390_store_status_unloaded(vcpu, addr);
 	return kvm_s390_store_status_unloaded(vcpu, addr);

+ 3 - 3
arch/s390/mm/fault.c

@@ -228,7 +228,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
 		return;
 		return;
 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
 	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
 	       regs->int_code & 0xffff, regs->int_code >> 17);
 	       regs->int_code & 0xffff, regs->int_code >> 17);
-	print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
+	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
 	printk(KERN_CONT "\n");
 	printk(KERN_CONT "\n");
 	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
 	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
 	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
 	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
@@ -256,9 +256,9 @@ static noinline void do_no_context(struct pt_regs *regs)
 	const struct exception_table_entry *fixup;
 	const struct exception_table_entry *fixup;
 
 
 	/* Are we prepared to handle this kernel fault?  */
 	/* Are we prepared to handle this kernel fault?  */
-	fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+	fixup = search_exception_tables(regs->psw.addr);
 	if (fixup) {
 	if (fixup) {
-		regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
+		regs->psw.addr = extable_fixup(fixup);
 		return;
 		return;
 	}
 	}
 
 

+ 1 - 1
arch/s390/mm/init.c

@@ -98,7 +98,7 @@ void __init paging_init(void)
 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
-	arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
+	__arch_local_irq_stosm(0x04);
 
 
 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 	sparse_init();
 	sparse_init();

+ 6 - 6
arch/s390/mm/mmap.c

@@ -169,12 +169,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
 
 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
 {
 {
-	if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
+	if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE)
 		return 0;
 		return 0;
 	if (!(flags & MAP_FIXED))
 	if (!(flags & MAP_FIXED))
 		addr = 0;
 		addr = 0;
 	if ((addr + len) >= TASK_SIZE)
 	if ((addr + len) >= TASK_SIZE)
-		return crst_table_upgrade(current->mm, 1UL << 53);
+		return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -189,9 +189,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
 	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
 	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
 	if (!(area & ~PAGE_MASK))
 	if (!(area & ~PAGE_MASK))
 		return area;
 		return area;
-	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
 		/* Upgrade the page table to 4 levels and retry. */
 		/* Upgrade the page table to 4 levels and retry. */
-		rc = crst_table_upgrade(mm, 1UL << 53);
+		rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
 		if (rc)
 		if (rc)
 			return (unsigned long) rc;
 			return (unsigned long) rc;
 		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
 		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -211,9 +211,9 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
 	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
 	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
 	if (!(area & ~PAGE_MASK))
 	if (!(area & ~PAGE_MASK))
 		return area;
 		return area;
-	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
 		/* Upgrade the page table to 4 levels and retry. */
 		/* Upgrade the page table to 4 levels and retry. */
-		rc = crst_table_upgrade(mm, 1UL << 53);
+		rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
 		if (rc)
 		if (rc)
 			return (unsigned long) rc;
 			return (unsigned long) rc;
 		area = arch_get_unmapped_area_topdown(filp, addr, len,
 		area = arch_get_unmapped_area_topdown(filp, addr, len,

+ 1 - 1
arch/s390/mm/pgtable.c

@@ -55,7 +55,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
 	unsigned long entry;
 	unsigned long entry;
 	int flush;
 	int flush;
 
 
-	BUG_ON(limit > (1UL << 53));
+	BUG_ON(limit > TASK_MAX_SIZE);
 	flush = 0;
 	flush = 0;
 repeat:
 repeat:
 	table = crst_table_alloc(mm);
 	table = crst_table_alloc(mm);

+ 2 - 4
arch/s390/numa/numa.c

@@ -57,9 +57,7 @@ static __init pg_data_t *alloc_node_data(void)
 {
 {
 	pg_data_t *res;
 	pg_data_t *res;
 
 
-	res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1);
-	if (!res)
-		panic("Could not allocate memory for node data!\n");
+	res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
 	memset(res, 0, sizeof(pg_data_t));
 	memset(res, 0, sizeof(pg_data_t));
 	return res;
 	return res;
 }
 }
@@ -162,7 +160,7 @@ static int __init numa_init_late(void)
 		register_one_node(nid);
 		register_one_node(nid);
 	return 0;
 	return 0;
 }
 }
-device_initcall(numa_init_late);
+arch_initcall(numa_init_late);
 
 
 static int __init parse_debug(char *parm)
 static int __init parse_debug(char *parm)
 {
 {

+ 4 - 5
arch/s390/oprofile/backtrace.c

@@ -16,24 +16,23 @@ __show_trace(unsigned int *depth, unsigned long sp,
 	struct pt_regs *regs;
 	struct pt_regs *regs;
 
 
 	while (*depth) {
 	while (*depth) {
-		sp = sp & PSW_ADDR_INSN;
 		if (sp < low || sp > high - sizeof(*sf))
 		if (sp < low || sp > high - sizeof(*sf))
 			return sp;
 			return sp;
 		sf = (struct stack_frame *) sp;
 		sf = (struct stack_frame *) sp;
 		(*depth)--;
 		(*depth)--;
-		oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+		oprofile_add_trace(sf->gprs[8]);
 
 
 		/* Follow the backchain.  */
 		/* Follow the backchain.  */
 		while (*depth) {
 		while (*depth) {
 			low = sp;
 			low = sp;
-			sp = sf->back_chain & PSW_ADDR_INSN;
+			sp = sf->back_chain;
 			if (!sp)
 			if (!sp)
 				break;
 				break;
 			if (sp <= low || sp > high - sizeof(*sf))
 			if (sp <= low || sp > high - sizeof(*sf))
 				return sp;
 				return sp;
 			sf = (struct stack_frame *) sp;
 			sf = (struct stack_frame *) sp;
 			(*depth)--;
 			(*depth)--;
-			oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+			oprofile_add_trace(sf->gprs[8]);
 
 
 		}
 		}
 
 
@@ -46,7 +45,7 @@ __show_trace(unsigned int *depth, unsigned long sp,
 			return sp;
 			return sp;
 		regs = (struct pt_regs *) sp;
 		regs = (struct pt_regs *) sp;
 		(*depth)--;
 		(*depth)--;
-		oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+		oprofile_add_trace(sf->gprs[8]);
 		low = sp;
 		low = sp;
 		sp = regs->gprs[15];
 		sp = regs->gprs[15];
 	}
 	}

+ 31 - 30
arch/s390/pci/pci.c

@@ -68,9 +68,12 @@ static struct airq_struct zpci_airq = {
 	.isc = PCI_ISC,
 	.isc = PCI_ISC,
 };
 };
 
 
-/* I/O Map */
+#define ZPCI_IOMAP_ENTRIES						\
+	min(((unsigned long) CONFIG_PCI_NR_FUNCTIONS * PCI_BAR_COUNT),	\
+	    ZPCI_IOMAP_MAX_ENTRIES)
+
 static DEFINE_SPINLOCK(zpci_iomap_lock);
 static DEFINE_SPINLOCK(zpci_iomap_lock);
-static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
+static unsigned long *zpci_iomap_bitmap;
 struct zpci_iomap_entry *zpci_iomap_start;
 struct zpci_iomap_entry *zpci_iomap_start;
 EXPORT_SYMBOL_GPL(zpci_iomap_start);
 EXPORT_SYMBOL_GPL(zpci_iomap_start);
 
 
@@ -265,27 +268,20 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
 			      unsigned long max)
 			      unsigned long max)
 {
 {
 	struct zpci_dev *zdev =	to_zpci(pdev);
 	struct zpci_dev *zdev =	to_zpci(pdev);
-	u64 addr;
 	int idx;
 	int idx;
 
 
-	if ((bar & 7) != bar)
+	if (!pci_resource_len(pdev, bar))
 		return NULL;
 		return NULL;
 
 
 	idx = zdev->bars[bar].map_idx;
 	idx = zdev->bars[bar].map_idx;
 	spin_lock(&zpci_iomap_lock);
 	spin_lock(&zpci_iomap_lock);
-	if (zpci_iomap_start[idx].count++) {
-		BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
-		       zpci_iomap_start[idx].bar != bar);
-	} else {
-		zpci_iomap_start[idx].fh = zdev->fh;
-		zpci_iomap_start[idx].bar = bar;
-	}
 	/* Detect overrun */
 	/* Detect overrun */
-	BUG_ON(!zpci_iomap_start[idx].count);
+	WARN_ON(!++zpci_iomap_start[idx].count);
+	zpci_iomap_start[idx].fh = zdev->fh;
+	zpci_iomap_start[idx].bar = bar;
 	spin_unlock(&zpci_iomap_lock);
 	spin_unlock(&zpci_iomap_lock);
 
 
-	addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
-	return (void __iomem *) addr + offset;
+	return (void __iomem *) ZPCI_ADDR(idx) + offset;
 }
 }
 EXPORT_SYMBOL(pci_iomap_range);
 EXPORT_SYMBOL(pci_iomap_range);
 
 
@@ -297,12 +293,11 @@ EXPORT_SYMBOL(pci_iomap);
 
 
 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 {
 {
-	unsigned int idx;
+	unsigned int idx = ZPCI_IDX(addr);
 
 
-	idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
 	spin_lock(&zpci_iomap_lock);
 	spin_lock(&zpci_iomap_lock);
 	/* Detect underrun */
 	/* Detect underrun */
-	BUG_ON(!zpci_iomap_start[idx].count);
+	WARN_ON(!zpci_iomap_start[idx].count);
 	if (!--zpci_iomap_start[idx].count) {
 	if (!--zpci_iomap_start[idx].count) {
 		zpci_iomap_start[idx].fh = 0;
 		zpci_iomap_start[idx].fh = 0;
 		zpci_iomap_start[idx].bar = 0;
 		zpci_iomap_start[idx].bar = 0;
@@ -544,15 +539,15 @@ static void zpci_irq_exit(void)
 
 
 static int zpci_alloc_iomap(struct zpci_dev *zdev)
 static int zpci_alloc_iomap(struct zpci_dev *zdev)
 {
 {
-	int entry;
+	unsigned long entry;
 
 
 	spin_lock(&zpci_iomap_lock);
 	spin_lock(&zpci_iomap_lock);
-	entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
-	if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
+	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
+	if (entry == ZPCI_IOMAP_ENTRIES) {
 		spin_unlock(&zpci_iomap_lock);
 		spin_unlock(&zpci_iomap_lock);
 		return -ENOSPC;
 		return -ENOSPC;
 	}
 	}
-	set_bit(entry, zpci_iomap);
+	set_bit(entry, zpci_iomap_bitmap);
 	spin_unlock(&zpci_iomap_lock);
 	spin_unlock(&zpci_iomap_lock);
 	return entry;
 	return entry;
 }
 }
@@ -561,7 +556,7 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
 {
 {
 	spin_lock(&zpci_iomap_lock);
 	spin_lock(&zpci_iomap_lock);
 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
-	clear_bit(entry, zpci_iomap);
+	clear_bit(entry, zpci_iomap_bitmap);
 	spin_unlock(&zpci_iomap_lock);
 	spin_unlock(&zpci_iomap_lock);
 }
 }
 
 
@@ -611,8 +606,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
 		if (zdev->bars[i].val & 4)
 		if (zdev->bars[i].val & 4)
 			flags |= IORESOURCE_MEM_64;
 			flags |= IORESOURCE_MEM_64;
 
 
-		addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
-
+		addr = ZPCI_ADDR(entry);
 		size = 1UL << zdev->bars[i].size;
 		size = 1UL << zdev->bars[i].size;
 
 
 		res = __alloc_res(zdev, addr, size, flags);
 		res = __alloc_res(zdev, addr, size, flags);
@@ -873,23 +867,30 @@ static int zpci_mem_init(void)
 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
 				16, 0, NULL);
 				16, 0, NULL);
 	if (!zdev_fmb_cache)
 	if (!zdev_fmb_cache)
-		goto error_zdev;
+		goto error_fmb;
 
 
-	/* TODO: use realloc */
-	zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
-				   GFP_KERNEL);
+	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
+				   sizeof(*zpci_iomap_start), GFP_KERNEL);
 	if (!zpci_iomap_start)
 	if (!zpci_iomap_start)
 		goto error_iomap;
 		goto error_iomap;
-	return 0;
 
 
+	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
+				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
+	if (!zpci_iomap_bitmap)
+		goto error_iomap_bitmap;
+
+	return 0;
+error_iomap_bitmap:
+	kfree(zpci_iomap_start);
 error_iomap:
 error_iomap:
 	kmem_cache_destroy(zdev_fmb_cache);
 	kmem_cache_destroy(zdev_fmb_cache);
-error_zdev:
+error_fmb:
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
 static void zpci_mem_exit(void)
 static void zpci_mem_exit(void)
 {
 {
+	kfree(zpci_iomap_bitmap);
 	kfree(zpci_iomap_start);
 	kfree(zpci_iomap_start);
 	kmem_cache_destroy(zdev_fmb_cache);
 	kmem_cache_destroy(zdev_fmb_cache);
 }
 }

+ 5 - 0
arch/s390/pci/pci_event.c

@@ -53,6 +53,11 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
 
 
 	pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
 	pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
 	       pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
 	       pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+
+	if (!pdev)
+		return;
+
+	pdev->error_state = pci_channel_io_perm_failure;
 }
 }
 
 
 void zpci_event_error(void *data)
 void zpci_event_error(void *data)

+ 0 - 1
arch/sh/include/asm/barrier.h

@@ -33,7 +33,6 @@
 #endif
 #endif
 
 
 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
 
 
 #include <asm-generic/barrier.h>
 #include <asm-generic/barrier.h>
 
 

+ 1 - 2
arch/x86/Kconfig

@@ -509,11 +509,10 @@ config X86_INTEL_CE
 
 
 config X86_INTEL_MID
 config X86_INTEL_MID
 	bool "Intel MID platform support"
 	bool "Intel MID platform support"
-	depends on X86_32
 	depends on X86_EXTENDED_PLATFORM
 	depends on X86_EXTENDED_PLATFORM
 	depends on X86_PLATFORM_DEVICES
 	depends on X86_PLATFORM_DEVICES
 	depends on PCI
 	depends on PCI
-	depends on PCI_GOANY
+	depends on X86_64 || (PCI_GOANY && X86_32)
 	depends on X86_IO_APIC
 	depends on X86_IO_APIC
 	select SFI
 	select SFI
 	select I2C
 	select I2C

+ 3 - 2
arch/x86/include/asm/irq.h

@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
 
 
 #define __ARCH_HAS_DO_SOFTIRQ
 #define __ARCH_HAS_DO_SOFTIRQ
 
 
+struct irq_desc;
+
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 extern int check_irq_vectors_for_cpu_disable(void);
 extern int check_irq_vectors_for_cpu_disable(void);
 extern void fixup_irqs(void);
 extern void fixup_irqs(void);
-extern void irq_force_complete_move(int);
+extern void irq_force_complete_move(struct irq_desc *desc);
 #endif
 #endif
 
 
 #ifdef CONFIG_HAVE_KVM
 #ifdef CONFIG_HAVE_KVM
@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
 extern void (*x86_platform_ipi_callback)(void);
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
 extern void native_init_IRQ(void);
 
 
-struct irq_desc;
 extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 
 
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);

+ 2 - 4
arch/x86/include/asm/pgtable_types.h

@@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
 }
 }
 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
 {
 {
+	pgprotval_t val = pgprot_val(pgprot);
 	pgprot_t new;
 	pgprot_t new;
-	unsigned long val;
 
 
-	val = pgprot_val(pgprot);
 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 		((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
 		((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
 	return new;
 	return new;
 }
 }
 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
 {
 {
+	pgprotval_t val = pgprot_val(pgprot);
 	pgprot_t new;
 	pgprot_t new;
-	unsigned long val;
 
 
-	val = pgprot_val(pgprot);
 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
 			  ((val & _PAGE_PAT_LARGE) >>
 			  ((val & _PAGE_PAT_LARGE) >>
 			   (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
 			   (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));

+ 5 - 1
arch/x86/kernel/apic/io_apic.c

@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
 {
 {
 	int pin, ioapic, irq, irq_entry;
 	int pin, ioapic, irq, irq_entry;
 	const struct cpumask *mask;
 	const struct cpumask *mask;
+	struct irq_desc *desc;
 	struct irq_data *idata;
 	struct irq_data *idata;
 	struct irq_chip *chip;
 	struct irq_chip *chip;
 
 
@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
 		if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
 		if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
 			continue;
 			continue;
 
 
-		idata = irq_get_irq_data(irq);
+		desc = irq_to_desc(irq);
+		raw_spin_lock_irq(&desc->lock);
+		idata = irq_desc_get_irq_data(desc);
 
 
 		/*
 		/*
 		 * Honour affinities which have been set in early boot
 		 * Honour affinities which have been set in early boot
@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
 		/* Might be lapic_chip for irq 0 */
 		/* Might be lapic_chip for irq 0 */
 		if (chip->irq_set_affinity)
 		if (chip->irq_set_affinity)
 			chip->irq_set_affinity(idata, mask, false);
 			chip->irq_set_affinity(idata, mask, false);
+		raw_spin_unlock_irq(&desc->lock);
 	}
 	}
 }
 }
 #endif
 #endif

+ 145 - 76
arch/x86/kernel/apic/vector.c

@@ -31,7 +31,7 @@ struct apic_chip_data {
 struct irq_domain *x86_vector_domain;
 struct irq_domain *x86_vector_domain;
 EXPORT_SYMBOL_GPL(x86_vector_domain);
 EXPORT_SYMBOL_GPL(x86_vector_domain);
 static DEFINE_RAW_SPINLOCK(vector_lock);
 static DEFINE_RAW_SPINLOCK(vector_lock);
-static cpumask_var_t vector_cpumask;
+static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
 static struct irq_chip lapic_controller;
 static struct irq_chip lapic_controller;
 #ifdef	CONFIG_X86_IO_APIC
 #ifdef	CONFIG_X86_IO_APIC
 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@@ -118,35 +118,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
 	 */
 	 */
 	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
 	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
 	static int current_offset = VECTOR_OFFSET_START % 16;
 	static int current_offset = VECTOR_OFFSET_START % 16;
-	int cpu, err;
+	int cpu, vector;
 
 
-	if (d->move_in_progress)
+	/*
+	 * If there is still a move in progress or the previous move has not
+	 * been cleaned up completely, tell the caller to come back later.
+	 */
+	if (d->move_in_progress ||
+	    cpumask_intersects(d->old_domain, cpu_online_mask))
 		return -EBUSY;
 		return -EBUSY;
 
 
 	/* Only try and allocate irqs on cpus that are present */
 	/* Only try and allocate irqs on cpus that are present */
-	err = -ENOSPC;
 	cpumask_clear(d->old_domain);
 	cpumask_clear(d->old_domain);
+	cpumask_clear(searched_cpumask);
 	cpu = cpumask_first_and(mask, cpu_online_mask);
 	cpu = cpumask_first_and(mask, cpu_online_mask);
 	while (cpu < nr_cpu_ids) {
 	while (cpu < nr_cpu_ids) {
-		int new_cpu, vector, offset;
+		int new_cpu, offset;
 
 
+		/* Get the possible target cpus for @mask/@cpu from the apic */
 		apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 		apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 
 
+		/*
+		 * Clear the offline cpus from @vector_cpumask for searching
+		 * and verify whether the result overlaps with @mask. If true,
+		 * then the call to apic->cpu_mask_to_apicid_and() will
+		 * succeed as well. If not, no point in trying to find a
+		 * vector in this mask.
+		 */
+		cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
+		if (!cpumask_intersects(vector_searchmask, mask))
+			goto next_cpu;
+
 		if (cpumask_subset(vector_cpumask, d->domain)) {
 		if (cpumask_subset(vector_cpumask, d->domain)) {
-			err = 0;
 			if (cpumask_equal(vector_cpumask, d->domain))
 			if (cpumask_equal(vector_cpumask, d->domain))
-				break;
+				goto success;
 			/*
 			/*
-			 * New cpumask using the vector is a proper subset of
-			 * the current in use mask. So cleanup the vector
-			 * allocation for the members that are not used anymore.
+			 * Mark the cpus which are not longer in the mask for
+			 * cleanup.
 			 */
 			 */
-			cpumask_andnot(d->old_domain, d->domain,
-				       vector_cpumask);
-			d->move_in_progress =
-			   cpumask_intersects(d->old_domain, cpu_online_mask);
-			cpumask_and(d->domain, d->domain, vector_cpumask);
-			break;
+			cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
+			vector = d->cfg.vector;
+			goto update;
 		}
 		}
 
 
 		vector = current_vector;
 		vector = current_vector;
@@ -158,45 +170,60 @@ next:
 			vector = FIRST_EXTERNAL_VECTOR + offset;
 			vector = FIRST_EXTERNAL_VECTOR + offset;
 		}
 		}
 
 
-		if (unlikely(current_vector == vector)) {
-			cpumask_or(d->old_domain, d->old_domain,
-				   vector_cpumask);
-			cpumask_andnot(vector_cpumask, mask, d->old_domain);
-			cpu = cpumask_first_and(vector_cpumask,
-						cpu_online_mask);
-			continue;
-		}
+		/* If the search wrapped around, try the next cpu */
+		if (unlikely(current_vector == vector))
+			goto next_cpu;
 
 
 		if (test_bit(vector, used_vectors))
 		if (test_bit(vector, used_vectors))
 			goto next;
 			goto next;
 
 
-		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
+		for_each_cpu(new_cpu, vector_searchmask) {
 			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
 			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
 				goto next;
 				goto next;
 		}
 		}
 		/* Found one! */
 		/* Found one! */
 		current_vector = vector;
 		current_vector = vector;
 		current_offset = offset;
 		current_offset = offset;
-		if (d->cfg.vector) {
+		/* Schedule the old vector for cleanup on all cpus */
+		if (d->cfg.vector)
 			cpumask_copy(d->old_domain, d->domain);
 			cpumask_copy(d->old_domain, d->domain);
-			d->move_in_progress =
-			   cpumask_intersects(d->old_domain, cpu_online_mask);
-		}
-		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
+		for_each_cpu(new_cpu, vector_searchmask)
 			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
 			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
-		d->cfg.vector = vector;
-		cpumask_copy(d->domain, vector_cpumask);
-		err = 0;
-		break;
-	}
+		goto update;
 
 
-	if (!err) {
-		/* cache destination APIC IDs into cfg->dest_apicid */
-		err = apic->cpu_mask_to_apicid_and(mask, d->domain,
-						   &d->cfg.dest_apicid);
+next_cpu:
+		/*
+		 * We exclude the current @vector_cpumask from the requested
+		 * @mask and try again with the next online cpu in the
+		 * result. We cannot modify @mask, so we use @vector_cpumask
+		 * as a temporary buffer here as it will be reassigned when
+		 * calling apic->vector_allocation_domain() above.
+		 */
+		cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
+		cpumask_andnot(vector_cpumask, mask, searched_cpumask);
+		cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
+		continue;
 	}
 	}
+	return -ENOSPC;
 
 
-	return err;
+update:
+	/*
+	 * Exclude offline cpus from the cleanup mask and set the
+	 * move_in_progress flag when the result is not empty.
+	 */
+	cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
+	d->move_in_progress = !cpumask_empty(d->old_domain);
+	d->cfg.vector = vector;
+	cpumask_copy(d->domain, vector_cpumask);
+success:
+	/*
+	 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
+	 * as we already established, that mask & d->domain & cpu_online_mask
+	 * is not empty.
+	 */
+	BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
+					    &d->cfg.dest_apicid));
+	return 0;
 }
 }
 
 
 static int assign_irq_vector(int irq, struct apic_chip_data *data,
 static int assign_irq_vector(int irq, struct apic_chip_data *data,
@@ -226,10 +253,8 @@ static int assign_irq_vector_policy(int irq, int node,
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
 {
 	struct irq_desc *desc;
 	struct irq_desc *desc;
-	unsigned long flags;
 	int cpu, vector;
 	int cpu, vector;
 
 
-	raw_spin_lock_irqsave(&vector_lock, flags);
 	BUG_ON(!data->cfg.vector);
 	BUG_ON(!data->cfg.vector);
 
 
 	vector = data->cfg.vector;
 	vector = data->cfg.vector;
@@ -239,10 +264,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
 	data->cfg.vector = 0;
 	data->cfg.vector = 0;
 	cpumask_clear(data->domain);
 	cpumask_clear(data->domain);
 
 
-	if (likely(!data->move_in_progress)) {
-		raw_spin_unlock_irqrestore(&vector_lock, flags);
+	/*
+	 * If move is in progress or the old_domain mask is not empty,
+	 * i.e. the cleanup IPI has not been processed yet, we need to remove
+	 * the old references to desc from all cpus vector tables.
+	 */
+	if (!data->move_in_progress && cpumask_empty(data->old_domain))
 		return;
 		return;
-	}
 
 
 	desc = irq_to_desc(irq);
 	desc = irq_to_desc(irq);
 	for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
 	for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@@ -255,7 +283,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
 		}
 		}
 	}
 	}
 	data->move_in_progress = 0;
 	data->move_in_progress = 0;
-	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 }
 
 
 void init_irq_alloc_info(struct irq_alloc_info *info,
 void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -276,19 +303,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
 static void x86_vector_free_irqs(struct irq_domain *domain,
 static void x86_vector_free_irqs(struct irq_domain *domain,
 				 unsigned int virq, unsigned int nr_irqs)
 				 unsigned int virq, unsigned int nr_irqs)
 {
 {
+	struct apic_chip_data *apic_data;
 	struct irq_data *irq_data;
 	struct irq_data *irq_data;
+	unsigned long flags;
 	int i;
 	int i;
 
 
 	for (i = 0; i < nr_irqs; i++) {
 	for (i = 0; i < nr_irqs; i++) {
 		irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
 		irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
 		if (irq_data && irq_data->chip_data) {
 		if (irq_data && irq_data->chip_data) {
+			raw_spin_lock_irqsave(&vector_lock, flags);
 			clear_irq_vector(virq + i, irq_data->chip_data);
 			clear_irq_vector(virq + i, irq_data->chip_data);
-			free_apic_chip_data(irq_data->chip_data);
+			apic_data = irq_data->chip_data;
+			irq_domain_reset_irq_data(irq_data);
+			raw_spin_unlock_irqrestore(&vector_lock, flags);
+			free_apic_chip_data(apic_data);
 #ifdef	CONFIG_X86_IO_APIC
 #ifdef	CONFIG_X86_IO_APIC
 			if (virq + i < nr_legacy_irqs())
 			if (virq + i < nr_legacy_irqs())
 				legacy_irq_data[virq + i] = NULL;
 				legacy_irq_data[virq + i] = NULL;
 #endif
 #endif
-			irq_domain_reset_irq_data(irq_data);
 		}
 		}
 	}
 	}
 }
 }
@@ -406,6 +438,8 @@ int __init arch_early_irq_init(void)
 	arch_init_htirq_domain(x86_vector_domain);
 	arch_init_htirq_domain(x86_vector_domain);
 
 
 	BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
 	BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+	BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
+	BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
 
 
 	return arch_early_ioapic_init();
 	return arch_early_ioapic_init();
 }
 }
@@ -494,14 +528,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	err = assign_irq_vector(irq, data, dest);
 	err = assign_irq_vector(irq, data, dest);
-	if (err) {
-		if (assign_irq_vector(irq, data,
-				      irq_data_get_affinity_mask(irq_data)))
-			pr_err("Failed to recover vector for irq %d\n", irq);
-		return err;
-	}
-
-	return IRQ_SET_MASK_OK;
+	return err ? err : IRQ_SET_MASK_OK;
 }
 }
 
 
 static struct irq_chip lapic_controller = {
 static struct irq_chip lapic_controller = {
@@ -513,20 +540,12 @@ static struct irq_chip lapic_controller = {
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 static void __send_cleanup_vector(struct apic_chip_data *data)
 static void __send_cleanup_vector(struct apic_chip_data *data)
 {
 {
-	cpumask_var_t cleanup_mask;
-
-	if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
-		unsigned int i;
-
-		for_each_cpu_and(i, data->old_domain, cpu_online_mask)
-			apic->send_IPI_mask(cpumask_of(i),
-					    IRQ_MOVE_CLEANUP_VECTOR);
-	} else {
-		cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
-		apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
-		free_cpumask_var(cleanup_mask);
-	}
+	raw_spin_lock(&vector_lock);
+	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
 	data->move_in_progress = 0;
 	data->move_in_progress = 0;
+	if (!cpumask_empty(data->old_domain))
+		apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
+	raw_spin_unlock(&vector_lock);
 }
 }
 
 
 void send_cleanup_vector(struct irq_cfg *cfg)
 void send_cleanup_vector(struct irq_cfg *cfg)
@@ -570,12 +589,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
 			goto unlock;
 			goto unlock;
 
 
 		/*
 		/*
-		 * Check if the irq migration is in progress. If so, we
-		 * haven't received the cleanup request yet for this irq.
+		 * Nothing to cleanup if irq migration is in progress
+		 * or this cpu is not set in the cleanup mask.
 		 */
 		 */
-		if (data->move_in_progress)
+		if (data->move_in_progress ||
+		    !cpumask_test_cpu(me, data->old_domain))
 			goto unlock;
 			goto unlock;
 
 
+		/*
+		 * We have two cases to handle here:
+		 * 1) vector is unchanged but the target mask got reduced
+		 * 2) vector and the target mask has changed
+		 *
+		 * #1 is obvious, but in #2 we have two vectors with the same
+		 * irq descriptor: the old and the new vector. So we need to
+		 * make sure that we only cleanup the old vector. The new
+		 * vector has the current @vector number in the config and
+		 * this cpu is part of the target mask. We better leave that
+		 * one alone.
+		 */
 		if (vector == data->cfg.vector &&
 		if (vector == data->cfg.vector &&
 		    cpumask_test_cpu(me, data->domain))
 		    cpumask_test_cpu(me, data->domain))
 			goto unlock;
 			goto unlock;
@@ -593,6 +625,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
 			goto unlock;
 			goto unlock;
 		}
 		}
 		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
 		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+		cpumask_clear_cpu(me, data->old_domain);
 unlock:
 unlock:
 		raw_spin_unlock(&desc->lock);
 		raw_spin_unlock(&desc->lock);
 	}
 	}
@@ -621,12 +654,48 @@ void irq_complete_move(struct irq_cfg *cfg)
 	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
 	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
 }
 }
 
 
-void irq_force_complete_move(int irq)
+/*
+ * Called with @desc->lock held and interrupts disabled.
+ */
+void irq_force_complete_move(struct irq_desc *desc)
 {
 {
-	struct irq_cfg *cfg = irq_cfg(irq);
+	struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+	struct apic_chip_data *data = apic_chip_data(irqdata);
+	struct irq_cfg *cfg = data ? &data->cfg : NULL;
 
 
-	if (cfg)
-		__irq_complete_move(cfg, cfg->vector);
+	if (!cfg)
+		return;
+
+	__irq_complete_move(cfg, cfg->vector);
+
+	/*
+	 * This is tricky. If the cleanup of @data->old_domain has not been
+	 * done yet, then the following setaffinity call will fail with
+	 * -EBUSY. This can leave the interrupt in a stale state.
+	 *
+	 * The cleanup cannot make progress because we hold @desc->lock. So in
+	 * case @data->old_domain is not yet cleaned up, we need to drop the
+	 * lock and acquire it again. @desc cannot go away, because the
+	 * hotplug code holds the sparse irq lock.
+	 */
+	raw_spin_lock(&vector_lock);
+	/* Clean out all offline cpus (including ourself) first. */
+	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+	while (!cpumask_empty(data->old_domain)) {
+		raw_spin_unlock(&vector_lock);
+		raw_spin_unlock(&desc->lock);
+		cpu_relax();
+		raw_spin_lock(&desc->lock);
+		/*
+		 * Reevaluate apic_chip_data. It might have been cleared after
+		 * we dropped @desc->lock.
+		 */
+		data = apic_chip_data(irqdata);
+		if (!data)
+			return;
+		raw_spin_lock(&vector_lock);
+	}
+	raw_spin_unlock(&vector_lock);
 }
 }
 #endif
 #endif
 
 

+ 4 - 1
arch/x86/kernel/apic/x2apic_uv_x.c

@@ -889,7 +889,10 @@ void __init uv_system_init(void)
 		return;
 		return;
 	}
 	}
 	pr_info("UV: Found %s hub\n", hub);
 	pr_info("UV: Found %s hub\n", hub);
-	map_low_mmrs();
+
+	/* We now only need to map the MMRs on UV1 */
+	if (is_uv1_hub())
+		map_low_mmrs();
 
 
 	m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
 	m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
 	m_val = m_n_config.s.m_skt;
 	m_val = m_n_config.s.m_skt;

+ 3 - 4
arch/x86/kernel/cpu/perf_event_intel.c

@@ -1960,7 +1960,8 @@ intel_bts_constraints(struct perf_event *event)
 
 
 static int intel_alt_er(int idx, u64 config)
 static int intel_alt_er(int idx, u64 config)
 {
 {
-	int alt_idx;
+	int alt_idx = idx;
+
 	if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
 	if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
 		return idx;
 		return idx;
 
 
@@ -2897,14 +2898,12 @@ static void intel_pmu_cpu_starting(int cpu)
 		return;
 		return;
 
 
 	if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
 	if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
-		void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
-
 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
 			struct intel_shared_regs *pc;
 			struct intel_shared_regs *pc;
 
 
 			pc = per_cpu(cpu_hw_events, i).shared_regs;
 			pc = per_cpu(cpu_hw_events, i).shared_regs;
 			if (pc && pc->core_id == core_id) {
 			if (pc && pc->core_id == core_id) {
-				*onln = cpuc->shared_regs;
+				cpuc->kfree_on_online[0] = cpuc->shared_regs;
 				cpuc->shared_regs = pc;
 				cpuc->shared_regs = pc;
 				break;
 				break;
 			}
 			}

+ 3 - 0
arch/x86/kernel/cpu/perf_event_intel_uncore.c

@@ -995,6 +995,9 @@ static int __init uncore_pci_init(void)
 	case 87: /* Knights Landing */
 	case 87: /* Knights Landing */
 		ret = knl_uncore_pci_init();
 		ret = knl_uncore_pci_init();
 		break;
 		break;
+	case 94: /* SkyLake */
+		ret = skl_uncore_pci_init();
+		break;
 	default:
 	default:
 		return 0;
 		return 0;
 	}
 	}

+ 1 - 0
arch/x86/kernel/cpu/perf_event_intel_uncore.h

@@ -336,6 +336,7 @@ int snb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
 int bdw_uncore_pci_init(void);
 int bdw_uncore_pci_init(void);
+int skl_uncore_pci_init(void);
 void snb_uncore_cpu_init(void);
 void snb_uncore_cpu_init(void);
 void nhm_uncore_cpu_init(void);
 void nhm_uncore_cpu_init(void);
 int snb_pci2phy_map_init(int devid);
 int snb_pci2phy_map_init(int devid);

+ 20 - 0
arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c

@@ -8,6 +8,7 @@
 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
+#define PCI_DEVICE_ID_INTEL_SKL_IMC	0x191f
 
 
 /* SNB event control */
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
@@ -524,6 +525,14 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
 	{ /* end: all zeroes */ },
 	{ /* end: all zeroes */ },
 };
 };
 
 
+static const struct pci_device_id skl_uncore_pci_ids[] = {
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* end: all zeroes */ },
+};
+
 static struct pci_driver snb_uncore_pci_driver = {
 static struct pci_driver snb_uncore_pci_driver = {
 	.name		= "snb_uncore",
 	.name		= "snb_uncore",
 	.id_table	= snb_uncore_pci_ids,
 	.id_table	= snb_uncore_pci_ids,
@@ -544,6 +553,11 @@ static struct pci_driver bdw_uncore_pci_driver = {
 	.id_table	= bdw_uncore_pci_ids,
 	.id_table	= bdw_uncore_pci_ids,
 };
 };
 
 
+static struct pci_driver skl_uncore_pci_driver = {
+	.name		= "skl_uncore",
+	.id_table	= skl_uncore_pci_ids,
+};
+
 struct imc_uncore_pci_dev {
 struct imc_uncore_pci_dev {
 	__u32 pci_id;
 	__u32 pci_id;
 	struct pci_driver *driver;
 	struct pci_driver *driver;
@@ -558,6 +572,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
+	IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
 	{  /* end marker */ }
 	{  /* end marker */ }
 };
 };
 
 
@@ -610,6 +625,11 @@ int bdw_uncore_pci_init(void)
 	return imc_uncore_pci_init();
 	return imc_uncore_pci_init();
 }
 }
 
 
+int skl_uncore_pci_init(void)
+{
+	return imc_uncore_pci_init();
+}
+
 /* end of Sandy Bridge uncore support */
 /* end of Sandy Bridge uncore support */
 
 
 /* Nehalem uncore support */
 /* Nehalem uncore support */

+ 8 - 0
arch/x86/kernel/head64.c

@@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data)
 
 
 	reserve_ebda_region();
 	reserve_ebda_region();
 
 
+	switch (boot_params.hdr.hardware_subarch) {
+	case X86_SUBARCH_INTEL_MID:
+		x86_intel_mid_early_setup();
+		break;
+	default:
+		break;
+	}
+
 	start_kernel();
 	start_kernel();
 }
 }

+ 10 - 1
arch/x86/kernel/irq.c

@@ -462,7 +462,7 @@ void fixup_irqs(void)
 		 * non intr-remapping case, we can't wait till this interrupt
 		 * non intr-remapping case, we can't wait till this interrupt
 		 * arrives at this cpu before completing the irq move.
 		 * arrives at this cpu before completing the irq move.
 		 */
 		 */
-		irq_force_complete_move(irq);
+		irq_force_complete_move(desc);
 
 
 		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
 		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
 			break_affinity = 1;
 			break_affinity = 1;
@@ -470,6 +470,15 @@ void fixup_irqs(void)
 		}
 		}
 
 
 		chip = irq_data_get_irq_chip(data);
 		chip = irq_data_get_irq_chip(data);
+		/*
+		 * The interrupt descriptor might have been cleaned up
+		 * already, but it is not yet removed from the radix tree
+		 */
+		if (!chip) {
+			raw_spin_unlock(&desc->lock);
+			continue;
+		}
+
 		if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
 		if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
 			chip->irq_mask(data);
 			chip->irq_mask(data);
 
 

+ 2 - 2
arch/x86/mm/pageattr.c

@@ -33,7 +33,7 @@ struct cpa_data {
 	pgd_t		*pgd;
 	pgd_t		*pgd;
 	pgprot_t	mask_set;
 	pgprot_t	mask_set;
 	pgprot_t	mask_clr;
 	pgprot_t	mask_clr;
-	int		numpages;
+	unsigned long	numpages;
 	int		flags;
 	int		flags;
 	unsigned long	pfn;
 	unsigned long	pfn;
 	unsigned	force_split : 1;
 	unsigned	force_split : 1;
@@ -1350,7 +1350,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
 		 * CPA operation. Either a large page has been
 		 * CPA operation. Either a large page has been
 		 * preserved or a single page update happened.
 		 * preserved or a single page update happened.
 		 */
 		 */
-		BUG_ON(cpa->numpages > numpages);
+		BUG_ON(cpa->numpages > numpages || !cpa->numpages);
 		numpages -= cpa->numpages;
 		numpages -= cpa->numpages;
 		if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
 		if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
 			cpa->curpage++;
 			cpa->curpage++;

+ 13 - 4
arch/x86/platform/efi/quirks.c

@@ -8,6 +8,7 @@
 #include <linux/memblock.h>
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
 #include <linux/bootmem.h>
 #include <linux/acpi.h>
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <asm/efi.h>
 #include <asm/efi.h>
 #include <asm/uv/uv.h>
 #include <asm/uv/uv.h>
 
 
@@ -248,6 +249,16 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
+static const struct dmi_system_id sgi_uv1_dmi[] = {
+	{ NULL, "SGI UV1",
+		{	DMI_MATCH(DMI_PRODUCT_NAME,	"Stoutland Platform"),
+			DMI_MATCH(DMI_PRODUCT_VERSION,	"1.0"),
+			DMI_MATCH(DMI_BIOS_VENDOR,	"SGI.COM"),
+		}
+	},
+	{ } /* NULL entry stops DMI scanning */
+};
+
 void __init efi_apply_memmap_quirks(void)
 void __init efi_apply_memmap_quirks(void)
 {
 {
 	/*
 	/*
@@ -260,10 +271,8 @@ void __init efi_apply_memmap_quirks(void)
 		efi_unmap_memmap();
 		efi_unmap_memmap();
 	}
 	}
 
 
-	/*
-	 * UV doesn't support the new EFI pagetable mapping yet.
-	 */
-	if (is_uv_system())
+	/* UV2+ BIOS has a fix for this issue.  UV1 still needs the quirk. */
+	if (dmi_check_system(sgi_uv1_dmi))
 		set_bit(EFI_OLD_MEMMAP, &efi.flags);
 		set_bit(EFI_OLD_MEMMAP, &efi.flags);
 }
 }
 
 

+ 3 - 5
arch/x86/platform/intel-mid/intel-mid.c

@@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void)
 		intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
 		intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
 	else {
 	else {
 		intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
 		intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
-		pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
+		pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
 	}
 	}
 
 
 out:
 out:
@@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg)
 	else if (strcmp("lapic_and_apbt", arg) == 0)
 	else if (strcmp("lapic_and_apbt", arg) == 0)
 		intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
 		intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
 	else {
 	else {
-		pr_warn("X86 INTEL_MID timer option %s not recognised"
-			   " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
-			   arg);
+		pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
+			arg);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 	return 0;
 	return 0;
 }
 }
 __setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
 __setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
-

+ 13 - 5
arch/x86/platform/intel-quark/imr.c

@@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
 		if (imr_is_enabled(&imr)) {
 		if (imr_is_enabled(&imr)) {
 			base = imr_to_phys(imr.addr_lo);
 			base = imr_to_phys(imr.addr_lo);
 			end = imr_to_phys(imr.addr_hi) + IMR_MASK;
 			end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+			size = end - base + 1;
 		} else {
 		} else {
 			base = 0;
 			base = 0;
 			end = 0;
 			end = 0;
+			size = 0;
 		}
 		}
-		size = end - base;
 		seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
 		seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
 			   "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
 			   "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
 			   &base, &end, size, imr.rmask, imr.wmask,
 			   &base, &end, size, imr.rmask, imr.wmask,
@@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
 {
 {
 	phys_addr_t base = virt_to_phys(&_text);
 	phys_addr_t base = virt_to_phys(&_text);
 	size_t size = virt_to_phys(&__end_rodata) - base;
 	size_t size = virt_to_phys(&__end_rodata) - base;
+	unsigned long start, end;
 	int i;
 	int i;
 	int ret;
 	int ret;
 
 
@@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
 	for (i = 0; i < idev->max_imr; i++)
 	for (i = 0; i < idev->max_imr; i++)
 		imr_clear(i);
 		imr_clear(i);
 
 
+	start = (unsigned long)_text;
+	end = (unsigned long)__end_rodata - 1;
+
 	/*
 	/*
 	 * Setup a locked IMR around the physical extent of the kernel
 	 * Setup a locked IMR around the physical extent of the kernel
 	 * from the beginning of the .text secton to the end of the
 	 * from the beginning of the .text secton to the end of the
 	 * .rodata section as one physically contiguous block.
 	 * .rodata section as one physically contiguous block.
+	 *
+	 * We don't round up @size since it is already PAGE_SIZE aligned.
+	 * See vmlinux.lds.S for details.
 	 */
 	 */
 	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
 	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
 	if (ret < 0) {
 	if (ret < 0) {
-		pr_err("unable to setup IMR for kernel: (%p - %p)\n",
-			&_text, &__end_rodata);
+		pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
+			size / 1024, start, end);
 	} else {
 	} else {
-		pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
-			size / 1024, &_text, &__end_rodata);
+		pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
+			size / 1024, start, end);
 	}
 	}
 
 
 }
 }

+ 19 - 7
block/blk-merge.c

@@ -70,6 +70,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
 	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 }
 }
 
 
+static inline unsigned get_max_io_size(struct request_queue *q,
+				       struct bio *bio)
+{
+	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+	unsigned mask = queue_logical_block_size(q) - 1;
+
+	/* aligned to logical block size */
+	sectors &= ~(mask >> 9);
+
+	return sectors;
+}
+
 static struct bio *blk_bio_segment_split(struct request_queue *q,
 static struct bio *blk_bio_segment_split(struct request_queue *q,
 					 struct bio *bio,
 					 struct bio *bio,
 					 struct bio_set *bs,
 					 struct bio_set *bs,
@@ -81,6 +93,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 	unsigned front_seg_size = bio->bi_seg_front_size;
 	unsigned front_seg_size = bio->bi_seg_front_size;
 	bool do_split = true;
 	bool do_split = true;
 	struct bio *new = NULL;
 	struct bio *new = NULL;
+	const unsigned max_sectors = get_max_io_size(q, bio);
 
 
 	bio_for_each_segment(bv, bio, iter) {
 	bio_for_each_segment(bv, bio, iter) {
 		/*
 		/*
@@ -90,20 +103,19 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
 		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
 			goto split;
 			goto split;
 
 
-		if (sectors + (bv.bv_len >> 9) >
-				blk_max_size_offset(q, bio->bi_iter.bi_sector)) {
+		if (sectors + (bv.bv_len >> 9) > max_sectors) {
 			/*
 			/*
 			 * Consider this a new segment if we're splitting in
 			 * Consider this a new segment if we're splitting in
 			 * the middle of this vector.
 			 * the middle of this vector.
 			 */
 			 */
 			if (nsegs < queue_max_segments(q) &&
 			if (nsegs < queue_max_segments(q) &&
-			    sectors < blk_max_size_offset(q,
-						bio->bi_iter.bi_sector)) {
+			    sectors < max_sectors) {
 				nsegs++;
 				nsegs++;
-				sectors = blk_max_size_offset(q,
-						bio->bi_iter.bi_sector);
+				sectors = max_sectors;
 			}
 			}
-			goto split;
+			if (sectors)
+				goto split;
+			/* Make this single bvec as the 1st segment */
 		}
 		}
 
 
 		if (bvprvp && blk_queue_cluster(q)) {
 		if (bvprvp && blk_queue_cluster(q)) {

+ 0 - 8
drivers/acpi/video_detect.c

@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
 		DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
 		DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
 		},
 		},
 	},
 	},
-	{
-	.callback = video_detect_force_vendor,
-	.ident = "Dell Inspiron 5737",
-	.matches = {
-		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-		DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
-		},
-	},
 
 
 	/*
 	/*
 	 * These models have a working acpi_video backlight control, and using
 	 * These models have a working acpi_video backlight control, and using

+ 2 - 0
drivers/base/platform-msi.c

@@ -284,6 +284,7 @@ out_free_priv_data:
 
 
 	return err;
 	return err;
 }
 }
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
 
 
 /**
 /**
  * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
  * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
@@ -301,6 +302,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
 	msi_domain_free_irqs(dev->msi_domain, dev);
 	msi_domain_free_irqs(dev->msi_domain, dev);
 	platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
 	platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
 }
 }
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
 
 
 /**
 /**
  * platform_msi_get_host_data - Query the private data associated with
  * platform_msi_get_host_data - Query the private data associated with

+ 9 - 4
drivers/base/platform.c

@@ -558,10 +558,15 @@ static int platform_drv_probe(struct device *_dev)
 		return ret;
 		return ret;
 
 
 	ret = dev_pm_domain_attach(_dev, true);
 	ret = dev_pm_domain_attach(_dev, true);
-	if (ret != -EPROBE_DEFER && drv->probe) {
-		ret = drv->probe(dev);
-		if (ret)
-			dev_pm_domain_detach(_dev, true);
+	if (ret != -EPROBE_DEFER) {
+		if (drv->probe) {
+			ret = drv->probe(dev);
+			if (ret)
+				dev_pm_domain_detach(_dev, true);
+		} else {
+			/* don't fail if just dev_pm_domain_attach failed */
+			ret = 0;
+		}
 	}
 	}
 
 
 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {

+ 20 - 17
drivers/base/power/domain.c

@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 
 
 /**
 /**
  * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
  * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
- * @genpd: PM domait to power off.
+ * @genpd: PM domain to power off.
  *
  *
  * Queue up the execution of genpd_poweroff() unless it's already been done
  * Queue up the execution of genpd_poweroff() unless it's already been done
  * before.
  * before.
@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 	queue_work(pm_wq, &genpd->power_off_work);
 	queue_work(pm_wq, &genpd->power_off_work);
 }
 }
 
 
-static int genpd_poweron(struct generic_pm_domain *genpd);
-
 /**
 /**
  * __genpd_poweron - Restore power to a given PM domain and its masters.
  * __genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
  * @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
  *
  *
  * Restore power to @genpd and all of its masters so that it is possible to
  * Restore power to @genpd and all of its masters so that it is possible to
  * resume a device belonging to it.
  * resume a device belonging to it.
  */
  */
-static int __genpd_poweron(struct generic_pm_domain *genpd)
+static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
 {
 {
 	struct gpd_link *link;
 	struct gpd_link *link;
 	int ret = 0;
 	int ret = 0;
@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
 	 * with it.
 	 * with it.
 	 */
 	 */
 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
-		genpd_sd_counter_inc(link->master);
+		struct generic_pm_domain *master = link->master;
+
+		genpd_sd_counter_inc(master);
+
+		mutex_lock_nested(&master->lock, depth + 1);
+		ret = __genpd_poweron(master, depth + 1);
+		mutex_unlock(&master->lock);
 
 
-		ret = genpd_poweron(link->master);
 		if (ret) {
 		if (ret) {
-			genpd_sd_counter_dec(link->master);
+			genpd_sd_counter_dec(master);
 			goto err;
 			goto err;
 		}
 		}
 	}
 	}
@@ -232,11 +236,12 @@ static int genpd_poweron(struct generic_pm_domain *genpd)
 	int ret;
 	int ret;
 
 
 	mutex_lock(&genpd->lock);
 	mutex_lock(&genpd->lock);
-	ret = __genpd_poweron(genpd);
+	ret = __genpd_poweron(genpd, 0);
 	mutex_unlock(&genpd->lock);
 	mutex_unlock(&genpd->lock);
 	return ret;
 	return ret;
 }
 }
 
 
+
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 {
 {
 	return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
 	return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
@@ -484,7 +489,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
 	}
 	}
 
 
 	mutex_lock(&genpd->lock);
 	mutex_lock(&genpd->lock);
-	ret = __genpd_poweron(genpd);
+	ret = __genpd_poweron(genpd, 0);
 	mutex_unlock(&genpd->lock);
 	mutex_unlock(&genpd->lock);
 
 
 	if (ret)
 	if (ret)
@@ -1339,8 +1344,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
 	if (!link)
 	if (!link)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	mutex_lock(&genpd->lock);
-	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+	mutex_lock(&subdomain->lock);
+	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
 
 
 	if (genpd->status == GPD_STATE_POWER_OFF
 	if (genpd->status == GPD_STATE_POWER_OFF
 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1363,8 +1368,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
 		genpd_sd_counter_inc(genpd);
 		genpd_sd_counter_inc(genpd);
 
 
  out:
  out:
-	mutex_unlock(&subdomain->lock);
 	mutex_unlock(&genpd->lock);
 	mutex_unlock(&genpd->lock);
+	mutex_unlock(&subdomain->lock);
 	if (ret)
 	if (ret)
 		kfree(link);
 		kfree(link);
 	return ret;
 	return ret;
@@ -1385,7 +1390,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	mutex_lock(&genpd->lock);
+	mutex_lock(&subdomain->lock);
+	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
 
 
 	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
 	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1398,22 +1404,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 		if (link->slave != subdomain)
 		if (link->slave != subdomain)
 			continue;
 			continue;
 
 
-		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
-
 		list_del(&link->master_node);
 		list_del(&link->master_node);
 		list_del(&link->slave_node);
 		list_del(&link->slave_node);
 		kfree(link);
 		kfree(link);
 		if (subdomain->status != GPD_STATE_POWER_OFF)
 		if (subdomain->status != GPD_STATE_POWER_OFF)
 			genpd_sd_counter_dec(genpd);
 			genpd_sd_counter_dec(genpd);
 
 
-		mutex_unlock(&subdomain->lock);
-
 		ret = 0;
 		ret = 0;
 		break;
 		break;
 	}
 	}
 
 
 out:
 out:
 	mutex_unlock(&genpd->lock);
 	mutex_unlock(&genpd->lock);
+	mutex_unlock(&subdomain->lock);
 
 
 	return ret;
 	return ret;
 }
 }

+ 12 - 0
drivers/clocksource/Kconfig

@@ -30,6 +30,8 @@ config CLKSRC_MMIO
 config DIGICOLOR_TIMER
 config DIGICOLOR_TIMER
 	bool "Digicolor timer driver" if COMPILE_TEST
 	bool "Digicolor timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	select CLKSRC_MMIO
+	depends on HAS_IOMEM
 	help
 	help
 	  Enables the support for the digicolor timer driver.
 	  Enables the support for the digicolor timer driver.
 
 
@@ -55,6 +57,7 @@ config ARMADA_370_XP_TIMER
 	bool "Armada 370 and XP timer driver" if COMPILE_TEST
 	bool "Armada 370 and XP timer driver" if COMPILE_TEST
 	depends on ARM
 	depends on ARM
 	select CLKSRC_OF
 	select CLKSRC_OF
+	select CLKSRC_MMIO
 	help
 	help
 	  Enables the support for the Armada 370 and XP timer driver.
 	  Enables the support for the Armada 370 and XP timer driver.
 
 
@@ -76,6 +79,7 @@ config ORION_TIMER
 config SUN4I_TIMER
 config SUN4I_TIMER
 	bool "Sun4i timer driver" if COMPILE_TEST
 	bool "Sun4i timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	select CLKSRC_MMIO
 	select CLKSRC_MMIO
 	help
 	help
 	  Enables support for the Sun4i timer.
 	  Enables support for the Sun4i timer.
@@ -89,6 +93,7 @@ config SUN5I_HSTIMER
 
 
 config TEGRA_TIMER
 config TEGRA_TIMER
 	bool "Tegra timer driver" if COMPILE_TEST
 	bool "Tegra timer driver" if COMPILE_TEST
+	select CLKSRC_MMIO
 	depends on ARM
 	depends on ARM
 	help
 	help
 	  Enables support for the Tegra driver.
 	  Enables support for the Tegra driver.
@@ -96,6 +101,7 @@ config TEGRA_TIMER
 config VT8500_TIMER
 config VT8500_TIMER
 	bool "VT8500 timer driver" if COMPILE_TEST
 	bool "VT8500 timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	help
 	help
 	  Enables support for the VT8500 driver.
 	  Enables support for the VT8500 driver.
 
 
@@ -131,6 +137,7 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
 config CLKSRC_DBX500_PRCMU
 config CLKSRC_DBX500_PRCMU
 	bool "Clocksource PRCMU Timer" if COMPILE_TEST
 	bool "Clocksource PRCMU Timer" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	help
 	help
 	  Use the always on PRCMU Timer as clocksource
 	  Use the always on PRCMU Timer as clocksource
 
 
@@ -248,6 +255,7 @@ config CLKSRC_EXYNOS_MCT
 config CLKSRC_SAMSUNG_PWM
 config CLKSRC_SAMSUNG_PWM
 	bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
 	bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	help
 	help
 	  This is a new clocksource driver for the PWM timer found in
 	  This is a new clocksource driver for the PWM timer found in
 	  Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
 	  Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@@ -257,12 +265,14 @@ config CLKSRC_SAMSUNG_PWM
 config FSL_FTM_TIMER
 config FSL_FTM_TIMER
 	bool "Freescale FlexTimer Module driver" if COMPILE_TEST
 	bool "Freescale FlexTimer Module driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	select CLKSRC_MMIO
 	select CLKSRC_MMIO
 	help
 	help
 	  Support for Freescale FlexTimer Module (FTM) timer.
 	  Support for Freescale FlexTimer Module (FTM) timer.
 
 
 config VF_PIT_TIMER
 config VF_PIT_TIMER
 	bool
 	bool
+	select CLKSRC_MMIO
 	help
 	help
 	  Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
 	  Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
 
 
@@ -360,6 +370,7 @@ config CLKSRC_TANGO_XTAL
 config CLKSRC_PXA
 config CLKSRC_PXA
 	bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
 	bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	select CLKSRC_MMIO
 	select CLKSRC_MMIO
 	help
 	help
 	  This enables OST0 support available on PXA and SA-11x0
 	  This enables OST0 support available on PXA and SA-11x0
@@ -394,6 +405,7 @@ config CLKSRC_ST_LPC
 	bool "Low power clocksource found in the LPC" if COMPILE_TEST
 	bool "Low power clocksource found in the LPC" if COMPILE_TEST
 	select CLKSRC_OF if OF
 	select CLKSRC_OF if OF
 	depends on HAS_IOMEM
 	depends on HAS_IOMEM
+	select CLKSRC_MMIO
 	help
 	help
 	  Enable this option to use the Low Power controller timer
 	  Enable this option to use the Low Power controller timer
 	  as clocksource.
 	  as clocksource.

+ 2 - 1
drivers/clocksource/tcb_clksrc.c

@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
 
 
 	__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
 	__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
 	__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
 	__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-	clk_disable(tcd->clk);
+	if (!clockevent_state_detached(d))
+		clk_disable(tcd->clk);
 
 
 	return 0;
 	return 0;
 }
 }

+ 7 - 8
drivers/cpufreq/cpufreq-dt.c

@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev,
 
 
 try_again:
 try_again:
 	cpu_reg = regulator_get_optional(cpu_dev, reg);
 	cpu_reg = regulator_get_optional(cpu_dev, reg);
-	if (IS_ERR(cpu_reg)) {
+	ret = PTR_ERR_OR_ZERO(cpu_reg);
+	if (ret) {
 		/*
 		/*
 		 * If cpu's regulator supply node is present, but regulator is
 		 * If cpu's regulator supply node is present, but regulator is
 		 * not yet registered, we should try defering probe.
 		 * not yet registered, we should try defering probe.
 		 */
 		 */
-		if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+		if (ret == -EPROBE_DEFER) {
 			dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
 			dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
 				cpu);
 				cpu);
-			return -EPROBE_DEFER;
+			return ret;
 		}
 		}
 
 
 		/* Try with "cpu-supply" */
 		/* Try with "cpu-supply" */
@@ -159,18 +160,16 @@ try_again:
 			goto try_again;
 			goto try_again;
 		}
 		}
 
 
-		dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
-			cpu, PTR_ERR(cpu_reg));
+		dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
 	}
 	}
 
 
 	cpu_clk = clk_get(cpu_dev, NULL);
 	cpu_clk = clk_get(cpu_dev, NULL);
-	if (IS_ERR(cpu_clk)) {
+	ret = PTR_ERR_OR_ZERO(cpu_clk);
+	if (ret) {
 		/* put regulator */
 		/* put regulator */
 		if (!IS_ERR(cpu_reg))
 		if (!IS_ERR(cpu_reg))
 			regulator_put(cpu_reg);
 			regulator_put(cpu_reg);
 
 
-		ret = PTR_ERR(cpu_clk);
-
 		/*
 		/*
 		 * If cpu's clk node is present, but clock is not yet
 		 * If cpu's clk node is present, but clock is not yet
 		 * registered, we should try defering probe.
 		 * registered, we should try defering probe.

+ 3 - 3
drivers/cpufreq/cpufreq.c

@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
 					  bool active)
 					  bool active)
 {
 {
 	do {
 	do {
-		policy = list_next_entry(policy, policy_list);
-
 		/* No more policies in the list */
 		/* No more policies in the list */
-		if (&policy->policy_list == &cpufreq_policy_list)
+		if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
 			return NULL;
 			return NULL;
+
+		policy = list_next_entry(policy, policy_list);
 	} while (!suitable_policy(policy, active));
 	} while (!suitable_policy(policy, active));
 
 
 	return policy;
 	return policy;

+ 8 - 3
drivers/cpufreq/cpufreq_governor.c

@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
 	if (!have_governor_per_policy())
 	if (!have_governor_per_policy())
 		cdata->gdbs_data = dbs_data;
 		cdata->gdbs_data = dbs_data;
 
 
+	policy->governor_data = dbs_data;
+
 	ret = sysfs_create_group(get_governor_parent_kobj(policy),
 	ret = sysfs_create_group(get_governor_parent_kobj(policy),
 				 get_sysfs_attr(dbs_data));
 				 get_sysfs_attr(dbs_data));
 	if (ret)
 	if (ret)
 		goto reset_gdbs_data;
 		goto reset_gdbs_data;
 
 
-	policy->governor_data = dbs_data;
-
 	return 0;
 	return 0;
 
 
 reset_gdbs_data:
 reset_gdbs_data:
+	policy->governor_data = NULL;
+
 	if (!have_governor_per_policy())
 	if (!have_governor_per_policy())
 		cdata->gdbs_data = NULL;
 		cdata->gdbs_data = NULL;
 	cdata->exit(dbs_data, !policy->governor->initialized);
 	cdata->exit(dbs_data, !policy->governor->initialized);
@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
 	if (!cdbs->shared || cdbs->shared->policy)
 	if (!cdbs->shared || cdbs->shared->policy)
 		return -EBUSY;
 		return -EBUSY;
 
 
-	policy->governor_data = NULL;
 	if (!--dbs_data->usage_count) {
 	if (!--dbs_data->usage_count) {
 		sysfs_remove_group(get_governor_parent_kobj(policy),
 		sysfs_remove_group(get_governor_parent_kobj(policy),
 				   get_sysfs_attr(dbs_data));
 				   get_sysfs_attr(dbs_data));
 
 
+		policy->governor_data = NULL;
+
 		if (!have_governor_per_policy())
 		if (!have_governor_per_policy())
 			cdata->gdbs_data = NULL;
 			cdata->gdbs_data = NULL;
 
 
 		cdata->exit(dbs_data, policy->governor->initialized == 1);
 		cdata->exit(dbs_data, policy->governor->initialized == 1);
 		kfree(dbs_data);
 		kfree(dbs_data);
+	} else {
+		policy->governor_data = NULL;
 	}
 	}
 
 
 	free_common_dbs_info(policy, cdata);
 	free_common_dbs_info(policy, cdata);

+ 1 - 1
drivers/cpufreq/pxa2xx-cpufreq.c

@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
 	}
 	}
 }
 }
 #else
 #else
-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
 {
 {
 	return 0;
 	return 0;
 }
 }

+ 0 - 1
drivers/cpuidle/coupled.c

@@ -119,7 +119,6 @@ struct cpuidle_coupled {
 
 
 #define CPUIDLE_COUPLED_NOT_IDLE	(-1)
 #define CPUIDLE_COUPLED_NOT_IDLE	(-1)
 
 
-static DEFINE_MUTEX(cpuidle_coupled_lock);
 static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
 static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
 
 
 /*
 /*

+ 1 - 1
drivers/cpuidle/cpuidle.c

@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 	 * be frozen safely.
 	 * be frozen safely.
 	 */
 	 */
 	index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
 	index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
-	if (index >= 0)
+	if (index > 0)
 		enter_freeze_proper(drv, dev, index);
 		enter_freeze_proper(drv, dev, index);
 
 
 	return index;
 	return index;

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно