Эх сурвалжийг харах

Merge branch 'linus' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 8 жил өмнө
parent
commit
02cb689b2c
100 өөрчлөгдсөн 1559 нэмэгдсэн , 397 устгасан
  1. 2 2
      Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
  2. 2 2
      Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
  3. 5 0
      Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
  4. 8 3
      Documentation/devicetree/bindings/pci/rockchip-pcie.txt
  5. 5 5
      Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
  6. 1 1
      Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
  7. 0 1
      Documentation/filesystems/Locking
  8. 0 1
      Documentation/filesystems/vfs.txt
  9. 2 2
      Documentation/i2c/i2c-topology
  10. 2 1
      Documentation/networking/dsa/dsa.txt
  11. 11 0
      Documentation/virtual/kvm/api.txt
  12. 3 1
      MAINTAINERS
  13. 10 7
      Makefile
  14. 6 1
      arch/arc/Makefile
  15. 1 1
      arch/arc/boot/dts/axc001.dtsi
  16. 1 1
      arch/arc/boot/dts/nsim_700.dts
  17. 4 0
      arch/arc/boot/dts/nsimosci.dts
  18. 1 0
      arch/arc/configs/nsim_700_defconfig
  19. 1 0
      arch/arc/configs/nsim_hs_defconfig
  20. 1 0
      arch/arc/configs/nsim_hs_smp_defconfig
  21. 1 0
      arch/arc/configs/nsimosci_defconfig
  22. 1 0
      arch/arc/configs/nsimosci_hs_defconfig
  23. 1 2
      arch/arc/configs/nsimosci_hs_smp_defconfig
  24. 2 0
      arch/arc/include/asm/arcregs.h
  25. 2 2
      arch/arc/include/asm/smp.h
  26. 2 0
      arch/arc/kernel/devtree.c
  27. 20 12
      arch/arc/kernel/mcip.c
  28. 11 9
      arch/arc/kernel/process.c
  29. 15 8
      arch/arc/kernel/smp.c
  30. 11 8
      arch/arc/kernel/time.c
  31. 26 0
      arch/arc/mm/dma.c
  32. 0 6
      arch/arc/plat-eznps/smp.c
  33. 7 7
      arch/arm/boot/dts/imx53-qsb.dts
  34. 5 0
      arch/arm/boot/dts/logicpd-som-lv.dtsi
  35. 2 2
      arch/arm/boot/dts/logicpd-torpedo-som.dtsi
  36. 4 3
      arch/arm/boot/dts/omap5-board-common.dtsi
  37. 1 1
      arch/arm/boot/dts/stih410-b2260.dts
  38. 4 0
      arch/arm/boot/dts/sun8i-a23-a33.dtsi
  39. 1 0
      arch/arm/include/asm/kvm_asm.h
  40. 3 0
      arch/arm/include/asm/kvm_host.h
  41. 1 0
      arch/arm/include/asm/kvm_hyp.h
  42. 20 0
      arch/arm/kernel/traps.c
  43. 5 0
      arch/arm/kernel/vmlinux-xip.lds.S
  44. 26 1
      arch/arm/kvm/arm.c
  45. 15 0
      arch/arm/kvm/hyp/tlb.c
  46. 3 34
      arch/arm/lib/backtrace.S
  47. 1 0
      arch/arm/mach-omap2/Kconfig
  48. 11 5
      arch/arm/mach-omap2/id.c
  49. 3 0
      arch/arm/mach-omap2/prm3xxx.c
  50. 6 0
      arch/arm/mach-omap2/voltage.c
  51. 1 1
      arch/arm/mm/dma-mapping.c
  52. 1 1
      arch/arm/mm/proc-v7m.S
  53. 2 2
      arch/arm64/boot/dts/marvell/armada-37xx.dtsi
  54. 3 3
      arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
  55. 5 2
      arch/arm64/boot/dts/rockchip/rk3399.dtsi
  56. 1 0
      arch/arm64/include/asm/kvm_asm.h
  57. 3 0
      arch/arm64/include/asm/kvm_host.h
  58. 1 1
      arch/arm64/include/asm/kvm_mmu.h
  59. 9 1
      arch/arm64/include/asm/perf_event.h
  60. 1 9
      arch/arm64/kernel/perf_event.c
  61. 15 0
      arch/arm64/kvm/hyp/tlb.c
  62. 8 2
      arch/arm64/kvm/sys_regs.c
  63. 1 0
      arch/nios2/kernel/time.c
  64. 12 3
      arch/powerpc/include/asm/exception-64s.h
  65. 1 0
      arch/powerpc/include/asm/ppc-opcode.h
  66. 8 3
      arch/powerpc/kernel/exceptions-64s.S
  67. 21 21
      arch/powerpc/kernel/process.c
  68. 14 6
      arch/powerpc/kernel/setup_64.c
  69. 4 0
      arch/powerpc/mm/hash_utils_64.c
  70. 4 0
      arch/powerpc/mm/pgtable-radix.c
  71. 4 0
      arch/powerpc/mm/tlb-radix.c
  72. 2 0
      arch/s390/kernel/vmlinux.lds.S
  73. 1 1
      arch/s390/pci/pci_dma.c
  74. 23 0
      arch/sparc/Kconfig
  75. 343 0
      arch/sparc/include/asm/hypervisor.h
  76. 28 0
      arch/sparc/include/asm/iommu_64.h
  77. 1 0
      arch/sparc/kernel/hvapi.c
  78. 6 2
      arch/sparc/kernel/iommu.c
  79. 0 1
      arch/sparc/kernel/iommu_common.h
  80. 360 58
      arch/sparc/kernel/pci_sun4v.c
  81. 21 0
      arch/sparc/kernel/pci_sun4v.h
  82. 68 0
      arch/sparc/kernel/pci_sun4v_asm.S
  83. 2 2
      arch/sparc/kernel/signal_32.c
  84. 64 7
      arch/sparc/mm/init_64.c
  85. 3 0
      arch/tile/include/asm/cache.h
  86. 2 2
      arch/x86/crypto/aesni-intel_glue.c
  87. 28 4
      arch/x86/events/intel/uncore_snb.c
  88. 1 0
      arch/x86/include/asm/intel-mid.h
  89. 4 1
      arch/x86/kernel/apm_32.c
  90. 1 5
      arch/x86/kernel/cpu/amd.c
  91. 30 2
      arch/x86/kernel/cpu/common.c
  92. 27 31
      arch/x86/kvm/irq_comm.c
  93. 34 13
      arch/x86/kvm/x86.c
  94. 1 1
      arch/x86/platform/efi/efi.c
  95. 57 23
      arch/x86/platform/efi/efi_64.c
  96. 19 0
      arch/x86/platform/intel-mid/pwr.c
  97. 1 0
      arch/x86/purgatory/Makefile
  98. 8 1
      arch/xtensa/include/uapi/asm/unistd.h
  99. 7 7
      arch/xtensa/kernel/time.c
  100. 22 52
      arch/xtensa/kernel/traps.c

+ 2 - 2
Documentation/ABI/testing/sysfs-devices-system-ibm-rtl

@@ -1,4 +1,4 @@
-What:           state
+What:           /sys/devices/system/ibm_rtl/state
 Date:           Sep 2010
 Date:           Sep 2010
 KernelVersion:  2.6.37
 KernelVersion:  2.6.37
 Contact:        Vernon Mauery <vernux@us.ibm.com>
 Contact:        Vernon Mauery <vernux@us.ibm.com>
@@ -10,7 +10,7 @@ Description:    The state file allows a means by which to change in and
 Users:          The ibm-prtm userspace daemon uses this interface.
 Users:          The ibm-prtm userspace daemon uses this interface.
 
 
 
 
-What:           version
+What:           /sys/devices/system/ibm_rtl/version
 Date:           Sep 2010
 Date:           Sep 2010
 KernelVersion:  2.6.37
 KernelVersion:  2.6.37
 Contact:        Vernon Mauery <vernux@us.ibm.com>
 Contact:        Vernon Mauery <vernux@us.ibm.com>

+ 2 - 2
Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt → Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt

@@ -6,7 +6,7 @@ perform in-band IPMI communication with their host.
 
 
 Required properties:
 Required properties:
 
 
-- compatible : should be "aspeed,ast2400-bt-bmc"
+- compatible : should be "aspeed,ast2400-ibt-bmc"
 - reg: physical address and size of the registers
 - reg: physical address and size of the registers
 
 
 Optional properties:
 Optional properties:
@@ -17,7 +17,7 @@ Optional properties:
 Example:
 Example:
 
 
 	ibt@1e789140 {
 	ibt@1e789140 {
-		compatible = "aspeed,ast2400-bt-bmc";
+		compatible = "aspeed,ast2400-ibt-bmc";
 		reg = <0x1e789140 0x18>;
 		reg = <0x1e789140 0x18>;
 		interrupts = <8>;
 		interrupts = <8>;
 	};
 	};

+ 5 - 0
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt

@@ -43,6 +43,9 @@ Optional properties:
   reset signal present internally in some host controller IC designs.
   reset signal present internally in some host controller IC designs.
   See Documentation/devicetree/bindings/reset/reset.txt for details.
   See Documentation/devicetree/bindings/reset/reset.txt for details.
 
 
+* reset-names: request name for using "resets" property. Must be "reset".
+	(It will be used together with "resets" property.)
+
 * clocks: from common clock binding: handle to biu and ciu clocks for the
 * clocks: from common clock binding: handle to biu and ciu clocks for the
   bus interface unit clock and the card interface unit clock.
   bus interface unit clock and the card interface unit clock.
 
 
@@ -103,6 +106,8 @@ board specific portions as listed below.
 		interrupts = <0 75 0>;
 		interrupts = <0 75 0>;
 		#address-cells = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		#size-cells = <0>;
+		resets = <&rst 20>;
+		reset-names = "reset";
 	};
 	};
 
 
 [board specific internal DMA resources]
 [board specific internal DMA resources]

+ 8 - 3
Documentation/devicetree/bindings/pci/rockchip-pcie.txt

@@ -26,13 +26,16 @@ Required properties:
 	- "sys"
 	- "sys"
 	- "legacy"
 	- "legacy"
 	- "client"
 	- "client"
-- resets: Must contain five entries for each entry in reset-names.
+- resets: Must contain seven entries for each entry in reset-names.
 	   See ../reset/reset.txt for details.
 	   See ../reset/reset.txt for details.
 - reset-names: Must include the following names
 - reset-names: Must include the following names
 	- "core"
 	- "core"
 	- "mgmt"
 	- "mgmt"
 	- "mgmt-sticky"
 	- "mgmt-sticky"
 	- "pipe"
 	- "pipe"
+	- "pm"
+	- "aclk"
+	- "pclk"
 - pinctrl-names : The pin control state names
 - pinctrl-names : The pin control state names
 - pinctrl-0: The "default" pinctrl state
 - pinctrl-0: The "default" pinctrl state
 - #interrupt-cells: specifies the number of cells needed to encode an
 - #interrupt-cells: specifies the number of cells needed to encode an
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
 	reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
 	reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
 	reg-names = "axi-base", "apb-base";
 	reg-names = "axi-base", "apb-base";
 	resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
 	resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
-		 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
-	reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+		 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+		 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+	reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+		      "pm", "pclk", "aclk";
 	phys = <&pcie_phy>;
 	phys = <&pcie_phy>;
 	phy-names = "pcie-phy";
 	phy-names = "pcie-phy";
 	pinctrl-names = "default";
 	pinctrl-names = "default";

+ 5 - 5
Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt

@@ -14,11 +14,6 @@ Required properies:
  - #size-cells	: The value of this property must be 1
  - #size-cells	: The value of this property must be 1
  - ranges	: defines mapping between pin controller node (parent) to
  - ranges	: defines mapping between pin controller node (parent) to
    gpio-bank node (children).
    gpio-bank node (children).
- - interrupt-parent: phandle of the interrupt parent to which the external
-   GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
-   which includes IRQ mux selection register, and the offset of the IRQ mux
-   selection register.
  - pins-are-numbered: Specify the subnodes are using numbered pinmux to
  - pins-are-numbered: Specify the subnodes are using numbered pinmux to
    specify pins.
    specify pins.
 
 
@@ -37,6 +32,11 @@ Required properties:
 
 
 Optional properties:
 Optional properties:
  - reset:	  : Reference to the reset controller
  - reset:	  : Reference to the reset controller
+ - interrupt-parent: phandle of the interrupt parent to which the external
+   GPIO interrupts are forwarded to.
+ - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
+   which includes IRQ mux selection register, and the offset of the IRQ mux
+   selection register.
 
 
 Example:
 Example:
 #include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
 #include <dt-bindings/pinctrl/stm32f429-pinfunc.h>

+ 1 - 1
Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt

@@ -12,7 +12,7 @@ Required properties:
 
 
 Optional properties:
 Optional properties:
 - ti,dmic: phandle for the OMAP dmic node if the machine have it connected
 - ti,dmic: phandle for the OMAP dmic node if the machine have it connected
-- ti,jack_detection: Need to be present if the board capable to detect jack
+- ti,jack-detection: Need to be present if the board capable to detect jack
   insertion, removal.
   insertion, removal.
 
 
 Available audio endpoints for the audio-routing table:
 Available audio endpoints for the audio-routing table:

+ 0 - 1
Documentation/filesystems/Locking

@@ -447,7 +447,6 @@ prototypes:
 	int (*flush) (struct file *);
 	int (*flush) (struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
 	int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
 	int (*fasync) (int, struct file *, int);
 	int (*fasync) (int, struct file *, int);
 	int (*lock) (struct file *, int, struct file_lock *);
 	int (*lock) (struct file *, int, struct file_lock *);
 	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
 	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,

+ 0 - 1
Documentation/filesystems/vfs.txt

@@ -828,7 +828,6 @@ struct file_operations {
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*release) (struct inode *, struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*fsync) (struct file *, loff_t, loff_t, int datasync);
 	int (*fsync) (struct file *, loff_t, loff_t, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
 	int (*fasync) (int, struct file *, int);
 	int (*fasync) (int, struct file *, int);
 	int (*lock) (struct file *, int, struct file_lock *);
 	int (*lock) (struct file *, int, struct file_lock *);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);

+ 2 - 2
Documentation/i2c/i2c-topology

@@ -326,7 +326,7 @@ Two parent-locked sibling muxes
 
 
 This is a good topology.
 This is a good topology.
 
 
-                                   .--------.
+                                    .--------.
                    .----------.  .--| dev D1 |
                    .----------.  .--| dev D1 |
                    |  parent- |--'  '--------'
                    |  parent- |--'  '--------'
                 .--|  locked  |     .--------.
                 .--|  locked  |     .--------.
@@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes
 
 
 This is a good topology.
 This is a good topology.
 
 
-                                   .--------.
+                                    .--------.
                    .----------.  .--| dev D1 |
                    .----------.  .--| dev D1 |
                    |   mux-   |--'  '--------'
                    |   mux-   |--'  '--------'
                 .--|  locked  |     .--------.
                 .--|  locked  |     .--------.

+ 2 - 1
Documentation/networking/dsa/dsa.txt

@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
 Switch tagging protocols
 Switch tagging protocols
 ------------------------
 ------------------------
 
 
-DSA currently supports 4 different tagging protocols, and a tag-less mode as
+DSA currently supports 5 different tagging protocols, and a tag-less mode as
 well. The different protocols are implemented in:
 well. The different protocols are implemented in:
 
 
 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
 net/dsa/tag_dsa.c: Marvell's original DSA tag
 net/dsa/tag_dsa.c: Marvell's original DSA tag
 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
 
 
 The exact format of the tag protocol is vendor specific, but in general, they
 The exact format of the tag protocol is vendor specific, but in general, they
 all contain something which:
 all contain something which:

+ 11 - 0
Documentation/virtual/kvm/api.txt

@@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In
 conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
 conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
 such as migration.
 such as migration.
 
 
+When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the
+set of bits that KVM can return in struct kvm_clock_data's flag member.
+
+The only flag defined now is KVM_CLOCK_TSC_STABLE.  If set, the returned
+value is the exact kvmclock value seen by all VCPUs at the instant
+when KVM_GET_CLOCK was called.  If clear, the returned value is simply
+CLOCK_MONOTONIC plus a constant offset; the offset can be modified
+with KVM_SET_CLOCK.  KVM will try to make all VCPUs follow this clock,
+but the exact value read by each VCPU could differ, because the host
+TSC is not stable.
+
 struct kvm_clock_data {
 struct kvm_clock_data {
 	__u64 clock;  /* kvmclock current value */
 	__u64 clock;  /* kvmclock current value */
 	__u32 flags;
 	__u32 flags;

+ 3 - 1
MAINTAINERS

@@ -7084,6 +7084,7 @@ F:	drivers/scsi/53c700*
 LED SUBSYSTEM
 LED SUBSYSTEM
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Jacek Anaszewski <j.anaszewski@samsung.com>
 M:	Jacek Anaszewski <j.anaszewski@samsung.com>
+M:	Pavel Machek <pavel@ucw.cz>
 L:	linux-leds@vger.kernel.org
 L:	linux-leds@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 S:	Maintained
 S:	Maintained
@@ -8057,6 +8058,7 @@ F:	drivers/infiniband/hw/mlx4/
 F:	include/linux/mlx4/
 F:	include/linux/mlx4/
 
 
 MELLANOX MLX5 core VPI driver
 MELLANOX MLX5 core VPI driver
+M:	Saeed Mahameed <saeedm@mellanox.com>
 M:	Matan Barak <matanb@mellanox.com>
 M:	Matan Barak <matanb@mellanox.com>
 M:	Leon Romanovsky <leonro@mellanox.com>
 M:	Leon Romanovsky <leonro@mellanox.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
@@ -9335,7 +9337,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
 M:	Keith Busch <keith.busch@intel.com>
 M:	Keith Busch <keith.busch@intel.com>
 L:	linux-pci@vger.kernel.org
 L:	linux-pci@vger.kernel.org
 S:	Supported
 S:	Supported
-F:	arch/x86/pci/vmd.c
+F:	drivers/pci/host/vmd.c
 
 
 PCIE DRIVER FOR ST SPEAR13XX
 PCIE DRIVER FOR ST SPEAR13XX
 M:	Pratyush Anand <pratyush.anand@gmail.com>
 M:	Pratyush Anand <pratyush.anand@gmail.com>

+ 10 - 7
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 9
 PATCHLEVEL = 9
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Psychotic Stoned Sheep
 NAME = Psychotic Stoned Sheep
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -370,7 +370,7 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL	=
 CFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 LDFLAGS_vmlinux =
 LDFLAGS_vmlinux =
-CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
 CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
 CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
 
 
 
 
@@ -399,11 +399,12 @@ KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
 		   -fno-strict-aliasing -fno-common \
 		   -fno-strict-aliasing -fno-common \
 		   -Werror-implicit-function-declaration \
 		   -Werror-implicit-function-declaration \
 		   -Wno-format-security \
 		   -Wno-format-security \
-		   -std=gnu89
+		   -std=gnu89 $(call cc-option,-fno-PIE)
+
 
 
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS   := -D__ASSEMBLY__
+KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
@@ -620,7 +621,6 @@ ARCH_CFLAGS :=
 include arch/$(SRCARCH)/Makefile
 include arch/$(SRCARCH)/Makefile
 
 
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
-KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
 
 
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -629,15 +629,18 @@ KBUILD_CFLAGS	+= $(call cc-option,-fdata-sections,)
 endif
 endif
 
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= -Os
+KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
 else
 else
 ifdef CONFIG_PROFILE_ALL_BRANCHES
 ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS	+= -O2
+KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,)
 else
 else
 KBUILD_CFLAGS   += -O2
 KBUILD_CFLAGS   += -O2
 endif
 endif
 endif
 endif
 
 
+KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
+			$(call cc-disable-warning,maybe-uninitialized,))
+
 # Tell gcc to never replace conditional load with a non-conditional one
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
 
 

+ 6 - 1
arch/arc/Makefile

@@ -50,6 +50,9 @@ atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
 
 
 cflags-$(atleast_gcc44)			+= -fsection-anchors
 cflags-$(atleast_gcc44)			+= -fsection-anchors
 
 
+cflags-$(CONFIG_ARC_HAS_LLSC)		+= -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE)		+= -mswape
+
 ifdef CONFIG_ISA_ARCV2
 ifdef CONFIG_ISA_ARCV2
 
 
 ifndef CONFIG_ARC_HAS_LL64
 ifndef CONFIG_ARC_HAS_LL64
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND)		+= -fasynchronous-unwind-tables $(cfi)
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 # Generic build system uses -O2, we want -O3
 # Generic build system uses -O2, we want -O3
 # Note: No need to add to cflags-y as that happens anyways
 # Note: No need to add to cflags-y as that happens anyways
-ARCH_CFLAGS += -O3
+#
+# Disable the false maybe-uninitialized warings gcc spits out at -O3
+ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
 endif
 endif
 
 
 # small data is default for elf32 tool-chain. If not usable, disable it
 # small data is default for elf32 tool-chain. If not usable, disable it

+ 1 - 1
arch/arc/boot/dts/axc001.dtsi

@@ -71,7 +71,7 @@
 			reg-io-width = <4>;
 			reg-io-width = <4>;
 		};
 		};
 
 
-		arcpmu0: pmu {
+		arcpct0: pct {
 			compatible = "snps,arc700-pct";
 			compatible = "snps,arc700-pct";
 		};
 		};
 	};
 	};

+ 1 - 1
arch/arc/boot/dts/nsim_700.dts

@@ -69,7 +69,7 @@
 			};
 			};
 		};
 		};
 
 
-		arcpmu0: pmu {
+		arcpct0: pct {
 			compatible = "snps,arc700-pct";
 			compatible = "snps,arc700-pct";
 		};
 		};
 	};
 	};

+ 4 - 0
arch/arc/boot/dts/nsimosci.dts

@@ -83,5 +83,9 @@
 			reg = <0xf0003000 0x44>;
 			reg = <0xf0003000 0x44>;
 			interrupts = <7>;
 			interrupts = <7>;
 		};
 		};
+
+		arcpct0: pct {
+			compatible = "snps,arc700-pct";
+		};
 	};
 	};
 };
 };

+ 1 - 0
arch/arc/configs/nsim_700_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsim_hs_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsim_hs_smp_defconfig

@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsimosci_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsimosci_hs_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 2
arch/arc/configs/nsimosci_hs_smp_defconfig

@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
 # CONFIG_PID_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HWMON is not set
 # CONFIG_HWMON is not set
 CONFIG_DRM=y
 CONFIG_DRM=y
 CONFIG_DRM_ARCPGU=y
 CONFIG_DRM_ARCPGU=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set

+ 2 - 0
arch/arc/include/asm/arcregs.h

@@ -43,12 +43,14 @@
 #define STATUS_AE_BIT		5	/* Exception active */
 #define STATUS_AE_BIT		5	/* Exception active */
 #define STATUS_DE_BIT		6	/* PC is in delay slot */
 #define STATUS_DE_BIT		6	/* PC is in delay slot */
 #define STATUS_U_BIT		7	/* User/Kernel mode */
 #define STATUS_U_BIT		7	/* User/Kernel mode */
+#define STATUS_Z_BIT            11
 #define STATUS_L_BIT		12	/* Loop inhibit */
 #define STATUS_L_BIT		12	/* Loop inhibit */
 
 
 /* These masks correspond to the status word(STATUS_32) bits */
 /* These masks correspond to the status word(STATUS_32) bits */
 #define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
 #define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
 #define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
 #define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
 #define STATUS_U_MASK		(1<<STATUS_U_BIT)
 #define STATUS_U_MASK		(1<<STATUS_U_BIT)
+#define STATUS_Z_MASK		(1<<STATUS_Z_BIT)
 #define STATUS_L_MASK		(1<<STATUS_L_BIT)
 #define STATUS_L_MASK		(1<<STATUS_L_BIT)
 
 
 /*
 /*

+ 2 - 2
arch/arc/include/asm/smp.h

@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
  * API expected BY platform smp code (FROM arch smp code)
  * API expected BY platform smp code (FROM arch smp code)
  *
  *
  * smp_ipi_irq_setup:
  * smp_ipi_irq_setup:
- *	Takes @cpu and @irq to which the arch-common ISR is hooked up
+ *	Takes @cpu and @hwirq to which the arch-common ISR is hooked up
  */
  */
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
 
 
 /*
 /*
  * struct plat_smp_ops	- SMP callbacks provided by platform to ARC SMP
  * struct plat_smp_ops	- SMP callbacks provided by platform to ARC SMP

+ 2 - 0
arch/arc/kernel/devtree.c

@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
 		arc_base_baud = 166666666;	/* Fixed 166.6MHz clk (TB10x) */
 		arc_base_baud = 166666666;	/* Fixed 166.6MHz clk (TB10x) */
 	else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
 	else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
 		arc_base_baud = 33333333;	/* Fixed 33MHz clk (AXS10x) */
 		arc_base_baud = 33333333;	/* Fixed 33MHz clk (AXS10x) */
+	else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
+		arc_base_baud = 800000000;      /* Fixed 800MHz clk (NPS) */
 	else
 	else
 		arc_base_baud = 50000000;	/* Fixed default 50MHz */
 		arc_base_baud = 50000000;	/* Fixed default 50MHz */
 }
 }

+ 20 - 12
arch/arc/kernel/mcip.c

@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	cpumask_t online;
 	cpumask_t online;
+	unsigned int destination_bits;
+	unsigned int distribution_mode;
 
 
 	/* errout if no online cpu per @cpumask */
 	/* errout if no online cpu per @cpumask */
 	if (!cpumask_and(&online, cpumask, cpu_online_mask))
 	if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 
 
 	raw_spin_lock_irqsave(&mcip_lock, flags);
 	raw_spin_lock_irqsave(&mcip_lock, flags);
 
 
-	idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
-	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+	destination_bits = cpumask_bits(&online)[0];
+	idu_set_dest(data->hwirq, destination_bits);
+
+	if (ffs(destination_bits) == fls(destination_bits))
+		distribution_mode = IDU_M_DISTRI_DEST;
+	else
+		distribution_mode = IDU_M_DISTRI_RR;
+
+	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
 
 
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 
 
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
 
 
 };
 };
 
 
-static int idu_first_irq;
+static irq_hw_number_t idu_first_hwirq;
 
 
 static void idu_cascade_isr(struct irq_desc *desc)
 static void idu_cascade_isr(struct irq_desc *desc)
 {
 {
-	struct irq_domain *domain = irq_desc_get_handler_data(desc);
-	unsigned int core_irq = irq_desc_get_irq(desc);
-	unsigned int idu_irq;
+	struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+	irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+	irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
 
 
-	idu_irq = core_irq - idu_first_irq;
-	generic_handle_irq(irq_find_mapping(domain, idu_irq));
+	generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
 }
 }
 
 
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
 	struct irq_domain *domain;
 	struct irq_domain *domain;
 	/* Read IDU BCR to confirm nr_irqs */
 	/* Read IDU BCR to confirm nr_irqs */
 	int nr_irqs = of_irq_count(intc);
 	int nr_irqs = of_irq_count(intc);
-	int i, irq;
+	int i, virq;
 	struct mcip_bcr mp;
 	struct mcip_bcr mp;
 
 
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
 		 * however we need it to get the parent virq and set IDU handler
 		 * however we need it to get the parent virq and set IDU handler
 		 * as first level isr
 		 * as first level isr
 		 */
 		 */
-		irq = irq_of_parse_and_map(intc, i);
+		virq = irq_of_parse_and_map(intc, i);
 		if (!i)
 		if (!i)
-			idu_first_irq = irq;
+			idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
 
 
-		irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
+		irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
 	}
 	}
 
 
 	__mcip_cmd(CMD_IDU_ENABLE, 0);
 	__mcip_cmd(CMD_IDU_ENABLE, 0);

+ 11 - 9
arch/arc/kernel/process.c

@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
 
 
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 {
 {
-	int uval;
-	int ret;
+	struct pt_regs *regs = current_pt_regs();
+	int uval = -EFAULT;
 
 
 	/*
 	/*
 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 	 */
 	 */
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
 
 
+	/* Z indicates to userspace if operation succeded */
+	regs->status32 &= ~STATUS_Z_MASK;
+
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	preempt_disable();
 	preempt_disable();
 
 
-	ret = __get_user(uval, uaddr);
-	if (ret)
+	if (__get_user(uval, uaddr))
 		goto done;
 		goto done;
 
 
-	if (uval != expected)
-		ret = -EAGAIN;
-	else
-		ret = __put_user(new, uaddr);
+	if (uval == expected) {
+		if (!__put_user(new, uaddr))
+			regs->status32 |= STATUS_Z_MASK;
+	}
 
 
 done:
 done:
 	preempt_enable();
 	preempt_enable();
 
 
-	return ret;
+	return uval;
 }
 }
 
 
 void arch_cpu_idle(void)
 void arch_cpu_idle(void)

+ 15 - 8
arch/arc/kernel/smp.c

@@ -22,6 +22,7 @@
 #include <linux/atomic.h>
 #include <linux/atomic.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
+#include <linux/irqdomain.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/mach_desc.h>
 #include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	int i;
 	int i;
 
 
 	/*
 	/*
-	 * Initialise the present map, which describes the set of CPUs
-	 * actually populated at the present time.
+	 * if platform didn't set the present map already, do it now
+	 * boot cpu is set to present already by init/main.c
 	 */
 	 */
-	for (i = 0; i < max_cpus; i++)
-		set_cpu_present(i, true);
+	if (num_present_cpus() <= 1) {
+		for (i = 0; i < max_cpus; i++)
+			set_cpu_present(i, true);
+	}
 }
 }
 
 
 void __init smp_cpus_done(unsigned int max_cpus)
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
  */
  */
 static DEFINE_PER_CPU(int, ipi_dev);
 static DEFINE_PER_CPU(int, ipi_dev);
 
 
-int smp_ipi_irq_setup(int cpu, int irq)
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
 {
 {
 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
+	unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+	if (!virq)
+		panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
 
 
 	/* Boot cpu calls request, all call enable */
 	/* Boot cpu calls request, all call enable */
 	if (!cpu) {
 	if (!cpu) {
 		int rc;
 		int rc;
 
 
-		rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+		rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
 		if (rc)
 		if (rc)
-			panic("Percpu IRQ request failed for %d\n", irq);
+			panic("Percpu IRQ request failed for %u\n", virq);
 	}
 	}
 
 
-	enable_percpu_irq(irq, 0);
+	enable_percpu_irq(virq, 0);
 
 
 	return 0;
 	return 0;
 }
 }

+ 11 - 8
arch/arc/kernel/time.c

@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
 		cycle_t  full;
 		cycle_t  full;
 	} stamp;
 	} stamp;
 
 
-
-	__asm__ __volatile(
-	"1:						\n"
-	"	lr		%0, [AUX_RTC_LOW]	\n"
-	"	lr		%1, [AUX_RTC_HIGH]	\n"
-	"	lr		%2, [AUX_RTC_CTRL]	\n"
-	"	bbit0.nt	%2, 31, 1b		\n"
-	: "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+	/*
+	 * hardware has an internal state machine which tracks readout of
+	 * low/high and updates the CTRL.status if
+	 *  - interrupt/exception taken between the two reads
+	 *  - high increments after low has been read
+	 */
+	do {
+		stamp.low = read_aux_reg(AUX_RTC_LOW);
+		stamp.high = read_aux_reg(AUX_RTC_HIGH);
+		status = read_aux_reg(AUX_RTC_CTRL);
+	} while (!(status & _BITUL(31)));
 
 
 	return stamp.full;
 	return stamp.full;
 }
 }

+ 26 - 0
arch/arc/mm/dma.c

@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
 	__free_pages(page, get_order(size));
 	__free_pages(page, get_order(size));
 }
 }
 
 
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			unsigned long attrs)
+{
+	unsigned long user_count = vma_pages(vma);
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+	unsigned long off = vma->vm_pgoff;
+	int ret = -ENXIO;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
+	if (off < count && user_count <= (count - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start,
+				      pfn + off,
+				      user_count << PAGE_SHIFT,
+				      vma->vm_page_prot);
+	}
+
+	return ret;
+}
+
 /*
 /*
  * streaming DMA Mapping API...
  * streaming DMA Mapping API...
  * CPU accesses page via normal paddr, thus needs to explicitly made
  * CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
 struct dma_map_ops arc_dma_ops = {
 struct dma_map_ops arc_dma_ops = {
 	.alloc			= arc_dma_alloc,
 	.alloc			= arc_dma_alloc,
 	.free			= arc_dma_free,
 	.free			= arc_dma_free,
+	.mmap			= arc_dma_mmap,
 	.map_page		= arc_dma_map_page,
 	.map_page		= arc_dma_map_page,
 	.map_sg			= arc_dma_map_sg,
 	.map_sg			= arc_dma_map_sg,
 	.sync_single_for_device	= arc_dma_sync_single_for_device,
 	.sync_single_for_device	= arc_dma_sync_single_for_device,

+ 0 - 6
arch/arc/plat-eznps/smp.c

@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
 	mtm_enable_core(cpu);
 	mtm_enable_core(cpu);
 }
 }
 
 
-static void eznps_ipi_clear(int irq)
-{
-	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
-}
-
 struct plat_smp_ops plat_smp_ops = {
 struct plat_smp_ops plat_smp_ops = {
 	.info		= smp_cpuinfo_buf,
 	.info		= smp_cpuinfo_buf,
 	.init_early_smp	= eznps_init_cpumasks,
 	.init_early_smp	= eznps_init_cpumasks,
 	.cpu_kick	= eznps_smp_wakeup_cpu,
 	.cpu_kick	= eznps_smp_wakeup_cpu,
 	.ipi_send	= eznps_ipi_send,
 	.ipi_send	= eznps_ipi_send,
 	.init_per_cpu	= eznps_init_per_cpu,
 	.init_per_cpu	= eznps_init_per_cpu,
-	.ipi_clear	= eznps_ipi_clear,
 };
 };

+ 7 - 7
arch/arm/boot/dts/imx53-qsb.dts

@@ -64,8 +64,8 @@
 			};
 			};
 
 
 			ldo3_reg: ldo3 {
 			ldo3_reg: ldo3 {
-				regulator-min-microvolt = <600000>;
-				regulator-max-microvolt = <1800000>;
+				regulator-min-microvolt = <1725000>;
+				regulator-max-microvolt = <3300000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 
 
@@ -76,8 +76,8 @@
 			};
 			};
 
 
 			ldo5_reg: ldo5 {
 			ldo5_reg: ldo5 {
-				regulator-min-microvolt = <1725000>;
-				regulator-max-microvolt = <3300000>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 
 
@@ -100,14 +100,14 @@
 			};
 			};
 
 
 			ldo9_reg: ldo9 {
 			ldo9_reg: ldo9 {
-				regulator-min-microvolt = <1200000>;
+				regulator-min-microvolt = <1250000>;
 				regulator-max-microvolt = <3600000>;
 				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 
 
 			ldo10_reg: ldo10 {
 			ldo10_reg: ldo10 {
-				regulator-min-microvolt = <1250000>;
-				regulator-max-microvolt = <3650000>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 		};
 		};

+ 5 - 0
arch/arm/boot/dts/logicpd-som-lv.dtsi

@@ -13,6 +13,11 @@
 		};
 		};
 	};
 	};
 
 
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x80000000 0>;
+	};
+
 	wl12xx_vmmc: wl12xx_vmmc {
 	wl12xx_vmmc: wl12xx_vmmc {
 		compatible = "regulator-fixed";
 		compatible = "regulator-fixed";
 		regulator-name = "vwl1271";
 		regulator-name = "vwl1271";

+ 2 - 2
arch/arm/boot/dts/logicpd-torpedo-som.dtsi

@@ -13,9 +13,9 @@
 		};
 		};
 	};
 	};
 
 
-	memory@0 {
+	memory@80000000 {
 		device_type = "memory";
 		device_type = "memory";
-		reg = <0 0>;
+		reg = <0x80000000 0>;
 	};
 	};
 
 
 	leds {
 	leds {

+ 4 - 3
arch/arm/boot/dts/omap5-board-common.dtsi

@@ -124,6 +124,7 @@
 		compatible = "ti,abe-twl6040";
 		compatible = "ti,abe-twl6040";
 		ti,model = "omap5-uevm";
 		ti,model = "omap5-uevm";
 
 
+		ti,jack-detection;
 		ti,mclk-freq = <19200000>;
 		ti,mclk-freq = <19200000>;
 
 
 		ti,mcpdm = <&mcpdm>;
 		ti,mcpdm = <&mcpdm>;
@@ -415,7 +416,7 @@
 			ti,backup-battery-charge-high-current;
 			ti,backup-battery-charge-high-current;
 		};
 		};
 
 
-		gpadc {
+		gpadc: gpadc {
 			compatible = "ti,palmas-gpadc";
 			compatible = "ti,palmas-gpadc";
 			interrupts = <18 0
 			interrupts = <18 0
 				      16 0
 				      16 0
@@ -475,8 +476,8 @@
 				smps6_reg: smps6 {
 				smps6_reg: smps6 {
 					/* VDD_DDR3 - over VDD_SMPS6 */
 					/* VDD_DDR3 - over VDD_SMPS6 */
 					regulator-name = "smps6";
 					regulator-name = "smps6";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
+					regulator-min-microvolt = <1350000>;
+					regulator-max-microvolt = <1350000>;
 					regulator-always-on;
 					regulator-always-on;
 					regulator-boot-on;
 					regulator-boot-on;
 				};
 				};

+ 1 - 1
arch/arm/boot/dts/stih410-b2260.dts

@@ -74,7 +74,7 @@
 		/* Low speed expansion connector */
 		/* Low speed expansion connector */
 		spi0: spi@9844000 {
 		spi0: spi@9844000 {
 			label = "LS-SPI0";
 			label = "LS-SPI0";
-			cs-gpio = <&pio30 3 0>;
+			cs-gpios = <&pio30 3 0>;
 			status = "okay";
 			status = "okay";
 		};
 		};
 
 

+ 4 - 0
arch/arm/boot/dts/sun8i-a23-a33.dtsi

@@ -282,11 +282,15 @@
 			uart1_pins_a: uart1@0 {
 			uart1_pins_a: uart1@0 {
 				allwinner,pins = "PG6", "PG7";
 				allwinner,pins = "PG6", "PG7";
 				allwinner,function = "uart1";
 				allwinner,function = "uart1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 			};
 
 
 			uart1_pins_cts_rts_a: uart1-cts-rts@0 {
 			uart1_pins_cts_rts_a: uart1-cts-rts@0 {
 				allwinner,pins = "PG8", "PG9";
 				allwinner,pins = "PG8", "PG9";
 				allwinner,function = "uart1";
 				allwinner,function = "uart1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 			};
 
 
 			mmc0_pins_a: mmc0@0 {
 			mmc0_pins_a: mmc0@0 {

+ 1 - 0
arch/arm/include/asm/kvm_asm.h

@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 

+ 3 - 0
arch/arm/include/asm/kvm_host.h

@@ -57,6 +57,9 @@ struct kvm_arch {
 	/* VTTBR value associated with below pgd and vmid */
 	/* VTTBR value associated with below pgd and vmid */
 	u64    vttbr;
 	u64    vttbr;
 
 
+	/* The last vcpu id that ran on each physical CPU */
+	int __percpu *last_vcpu_ran;
+
 	/* Timer */
 	/* Timer */
 	struct arch_timer_kvm	timer;
 	struct arch_timer_kvm	timer;
 
 

+ 1 - 0
arch/arm/include/asm/kvm_hyp.h

@@ -71,6 +71,7 @@
 #define ICIALLUIS	__ACCESS_CP15(c7, 0, c1, 0)
 #define ICIALLUIS	__ACCESS_CP15(c7, 0, c1, 0)
 #define ATS1CPR		__ACCESS_CP15(c7, 0, c8, 0)
 #define ATS1CPR		__ACCESS_CP15(c7, 0, c8, 0)
 #define TLBIALLIS	__ACCESS_CP15(c8, 0, c3, 0)
 #define TLBIALLIS	__ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL		__ACCESS_CP15(c8, 0, c7, 0)
 #define TLBIALLNSNHIS	__ACCESS_CP15(c8, 4, c3, 4)
 #define TLBIALLNSNHIS	__ACCESS_CP15(c8, 4, c3, 4)
 #define PRRR		__ACCESS_CP15(c10, 0, c2, 0)
 #define PRRR		__ACCESS_CP15(c10, 0, c2, 0)
 #define NMRR		__ACCESS_CP15(c10, 0, c2, 1)
 #define NMRR		__ACCESS_CP15(c10, 0, c2, 1)

+ 20 - 0
arch/arm/kernel/traps.c

@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
 		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
 		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
 }
 }
 
 
+void dump_backtrace_stm(u32 *stack, u32 instruction)
+{
+	char str[80], *p;
+	unsigned int x;
+	int reg;
+
+	for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
+		if (instruction & BIT(reg)) {
+			p += sprintf(p, " r%d:%08x", reg, *stack--);
+			if (++x == 6) {
+				x = 0;
+				p = str;
+				printk("%s\n", str);
+			}
+		}
+	}
+	if (p != str)
+		printk("%s\n", str);
+}
+
 #ifndef CONFIG_ARM_UNWIND
 #ifndef CONFIG_ARM_UNWIND
 /*
 /*
  * Stack pointers should always be within the kernels view of
  * Stack pointers should always be within the kernels view of

+ 5 - 0
arch/arm/kernel/vmlinux-xip.lds.S

@@ -3,6 +3,9 @@
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  */
  */
 
 
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
 #include <asm-generic/vmlinux.lds.h>
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
@@ -223,6 +226,8 @@ SECTIONS
 		. = ALIGN(PAGE_SIZE);
 		. = ALIGN(PAGE_SIZE);
 		__init_end = .;
 		__init_end = .;
 
 
+		*(.data..ro_after_init)
+
 		NOSAVE_DATA
 		NOSAVE_DATA
 		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 		READ_MOSTLY_DATA(L1_CACHE_BYTES)
 		READ_MOSTLY_DATA(L1_CACHE_BYTES)

+ 26 - 1
arch/arm/kvm/arm.c

@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
  */
  */
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
 {
-	int ret = 0;
+	int ret, cpu;
 
 
 	if (type)
 	if (type)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+	if (!kvm->arch.last_vcpu_ran)
+		return -ENOMEM;
+
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
 	ret = kvm_alloc_stage2_pgd(kvm);
 	ret = kvm_alloc_stage2_pgd(kvm);
 	if (ret)
 	if (ret)
 		goto out_fail_alloc;
 		goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 out_free_stage2_pgd:
 out_free_stage2_pgd:
 	kvm_free_stage2_pgd(kvm);
 	kvm_free_stage2_pgd(kvm);
 out_fail_alloc:
 out_fail_alloc:
+	free_percpu(kvm->arch.last_vcpu_ran);
+	kvm->arch.last_vcpu_ran = NULL;
 	return ret;
 	return ret;
 }
 }
 
 
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 {
 	int i;
 	int i;
 
 
+	free_percpu(kvm->arch.last_vcpu_ran);
+	kvm->arch.last_vcpu_ran = NULL;
+
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 		if (kvm->vcpus[i]) {
 		if (kvm->vcpus[i]) {
 			kvm_arch_vcpu_free(kvm->vcpus[i]);
 			kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 {
+	int *last_ran;
+
+	last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+	/*
+	 * We might get preempted before the vCPU actually runs, but
+	 * over-invalidation doesn't affect correctness.
+	 */
+	if (*last_ran != vcpu->vcpu_id) {
+		kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+		*last_ran = vcpu->vcpu_id;
+	}
+
 	vcpu->cpu = cpu;
 	vcpu->cpu = cpu;
 	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 
 

+ 15 - 0
arch/arm/kvm/hyp/tlb.c

@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 	__kvm_tlb_flush_vmid(kvm);
 	__kvm_tlb_flush_vmid(kvm);
 }
 }
 
 
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+	/* Switch to requested VMID */
+	write_sysreg(kvm->arch.vttbr, VTTBR);
+	isb();
+
+	write_sysreg(0, TLBIALL);
+	dsb(nsh);
+	isb();
+
+	write_sysreg(0, VTTBR);
+}
+
 void __hyp_text __kvm_flush_vm_context(void)
 void __hyp_text __kvm_flush_vm_context(void)
 {
 {
 	write_sysreg(0, TLBIALLNSNHIS);
 	write_sysreg(0, TLBIALLNSNHIS);

+ 3 - 34
arch/arm/lib/backtrace.S

@@ -10,6 +10,7 @@
  * 27/03/03 Ian Molton Clean up CONFIG_CPU
  * 27/03/03 Ian Molton Clean up CONFIG_CPU
  *
  *
  */
  */
+#include <linux/kern_levels.h>
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
 		.text
 		.text
@@ -83,13 +84,13 @@ for_each_frame:	tst	frame, mask		@ Check for address exceptions
 		teq	r3, r1, lsr #11
 		teq	r3, r1, lsr #11
 		ldreq	r0, [frame, #-8]	@ get sp
 		ldreq	r0, [frame, #-8]	@ get sp
 		subeq	r0, r0, #4		@ point at the last arg
 		subeq	r0, r0, #4		@ point at the last arg
-		bleq	.Ldumpstm		@ dump saved registers
+		bleq	dump_backtrace_stm	@ dump saved registers
 
 
 1004:		ldr	r1, [sv_pc, #0]		@ if stmfd sp!, {..., fp, ip, lr, pc}
 1004:		ldr	r1, [sv_pc, #0]		@ if stmfd sp!, {..., fp, ip, lr, pc}
 		ldr	r3, .Ldsi		@ instruction exists,
 		ldr	r3, .Ldsi		@ instruction exists,
 		teq	r3, r1, lsr #11
 		teq	r3, r1, lsr #11
 		subeq	r0, frame, #16
 		subeq	r0, frame, #16
-		bleq	.Ldumpstm		@ dump saved registers
+		bleq	dump_backtrace_stm	@ dump saved registers
 
 
 		teq	sv_fp, #0		@ zero saved fp means
 		teq	sv_fp, #0		@ zero saved fp means
 		beq	no_frame		@ no further frames
 		beq	no_frame		@ no further frames
@@ -112,38 +113,6 @@ ENDPROC(c_backtrace)
 		.long	1004b, 1006b
 		.long	1004b, 1006b
 		.popsection
 		.popsection
 
 
-#define instr r4
-#define reg   r5
-#define stack r6
-
-.Ldumpstm:	stmfd	sp!, {instr, reg, stack, r7, lr}
-		mov	stack, r0
-		mov	instr, r1
-		mov	reg, #10
-		mov	r7, #0
-1:		mov	r3, #1
- ARM(		tst	instr, r3, lsl reg	)
- THUMB(		lsl	r3, reg			)
- THUMB(		tst	instr, r3		)
-		beq	2f
-		add	r7, r7, #1
-		teq	r7, #6
-		moveq	r7, #0
-		adr	r3, .Lcr
-		addne	r3, r3, #1		@ skip newline
-		ldr	r2, [stack], #-4
-		mov	r1, reg
-		adr	r0, .Lfp
-		bl	printk
-2:		subs	reg, reg, #1
-		bpl	1b
-		teq	r7, #0
-		adrne	r0, .Lcr
-		blne	printk
-		ldmfd	sp!, {instr, reg, stack, r7, pc}
-
-.Lfp:		.asciz	" r%d:%08x%s"
-.Lcr:		.asciz	"\n"
 .Lbad:		.asciz	"Backtrace aborted due to bad frame pointer <%p>\n"
 .Lbad:		.asciz	"Backtrace aborted due to bad frame pointer <%p>\n"
 		.align
 		.align
 .Ldsi:		.word	0xe92dd800 >> 11	@ stmfd sp!, {... fp, ip, lr, pc}
 .Ldsi:		.word	0xe92dd800 >> 11	@ stmfd sp!, {... fp, ip, lr, pc}

+ 1 - 0
arch/arm/mach-omap2/Kconfig

@@ -71,6 +71,7 @@ config SOC_AM43XX
 	select HAVE_ARM_TWD
 	select HAVE_ARM_TWD
 	select ARM_ERRATA_754322
 	select ARM_ERRATA_754322
 	select ARM_ERRATA_775420
 	select ARM_ERRATA_775420
+	select OMAP_INTERCONNECT
 
 
 config SOC_DRA7XX
 config SOC_DRA7XX
 	bool "TI DRA7XX"
 	bool "TI DRA7XX"

+ 11 - 5
arch/arm/mach-omap2/id.c

@@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void)
 
 
 #define OMAP3_SHOW_FEATURE(feat)		\
 #define OMAP3_SHOW_FEATURE(feat)		\
 	if (omap3_has_ ##feat())		\
 	if (omap3_has_ ##feat())		\
-		printk(#feat" ");
+		n += scnprintf(buf + n, sizeof(buf) - n, #feat " ");
 
 
 static void __init omap3_cpuinfo(void)
 static void __init omap3_cpuinfo(void)
 {
 {
 	const char *cpu_name;
 	const char *cpu_name;
+	char buf[64];
+	int n = 0;
+
+	memset(buf, 0, sizeof(buf));
 
 
 	/*
 	/*
 	 * OMAP3430 and OMAP3530 are assumed to be same.
 	 * OMAP3430 and OMAP3530 are assumed to be same.
@@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void)
 		cpu_name = "OMAP3503";
 		cpu_name = "OMAP3503";
 	}
 	}
 
 
-	sprintf(soc_name, "%s", cpu_name);
+	scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name);
 
 
 	/* Print verbose information */
 	/* Print verbose information */
-	pr_info("%s %s (", soc_name, soc_rev);
+	n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev);
 
 
 	OMAP3_SHOW_FEATURE(l2cache);
 	OMAP3_SHOW_FEATURE(l2cache);
 	OMAP3_SHOW_FEATURE(iva);
 	OMAP3_SHOW_FEATURE(iva);
@@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void)
 	OMAP3_SHOW_FEATURE(neon);
 	OMAP3_SHOW_FEATURE(neon);
 	OMAP3_SHOW_FEATURE(isp);
 	OMAP3_SHOW_FEATURE(isp);
 	OMAP3_SHOW_FEATURE(192mhz_clk);
 	OMAP3_SHOW_FEATURE(192mhz_clk);
-
-	printk(")\n");
+	if (*(buf + n - 1) == ' ')
+		n--;
+	n += scnprintf(buf + n, sizeof(buf) - n, ")\n");
+	pr_info("%s", buf);
 }
 }
 
 
 #define OMAP3_CHECK_FEATURE(status,feat)				\
 #define OMAP3_CHECK_FEATURE(status,feat)				\

+ 3 - 0
arch/arm/mach-omap2/prm3xxx.c

@@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
 	if (has_uart4) {
 	if (has_uart4) {
 		en_uart4_mask = OMAP3630_EN_UART4_MASK;
 		en_uart4_mask = OMAP3630_EN_UART4_MASK;
 		grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
 		grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
+	} else {
+		en_uart4_mask = 0;
+		grpsel_uart4_mask = 0;
 	}
 	}
 
 
 	/* Enable wakeups in PER */
 	/* Enable wakeups in PER */

+ 6 - 0
arch/arm/mach-omap2/voltage.c

@@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm,
 		return -ENODATA;
 		return -ENODATA;
 	}
 	}
 
 
+	if (!voltdm->volt_data) {
+		pr_err("%s: No voltage data defined for vdd_%s\n",
+			__func__, voltdm->name);
+		return -ENODATA;
+	}
+
 	/* Adjust voltage to the exact voltage from the OPP table */
 	/* Adjust voltage to the exact voltage from the OPP table */
 	for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
 	for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
 		if (voltdm->volt_data[i].volt_nominal >= target_volt) {
 		if (voltdm->volt_data[i].volt_nominal >= target_volt) {

+ 1 - 1
arch/arm/mm/dma-mapping.c

@@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void)
 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 	return 0;
 	return 0;
 }
 }
-fs_initcall(dma_debug_do_init);
+core_initcall(dma_debug_do_init);
 
 
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 
 

+ 1 - 1
arch/arm/mm/proc-v7m.S

@@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin)
 	ret	lr
 	ret	lr
 ENDPROC(cpu_cm7_proc_fin)
 ENDPROC(cpu_cm7_proc_fin)
 
 
-	.section ".text.init", #alloc, #execinstr
+	.section ".init.text", #alloc, #execinstr
 
 
 __v7m_cm7_setup:
 __v7m_cm7_setup:
 	mov	r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
 	mov	r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)

+ 2 - 2
arch/arm64/boot/dts/marvell/armada-37xx.dtsi

@@ -105,7 +105,7 @@
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 
-			nb_perih_clk: nb-periph-clk@13000{
+			nb_periph_clk: nb-periph-clk@13000 {
 				compatible = "marvell,armada-3700-periph-clock-nb";
 				compatible = "marvell,armada-3700-periph-clock-nb";
 				reg = <0x13000 0x100>;
 				reg = <0x13000 0x100>;
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
@@ -113,7 +113,7 @@
 				#clock-cells = <1>;
 				#clock-cells = <1>;
 			};
 			};
 
 
-			sb_perih_clk: sb-periph-clk@18000{
+			sb_periph_clk: sb-periph-clk@18000 {
 				compatible = "marvell,armada-3700-periph-clock-sb";
 				compatible = "marvell,armada-3700-periph-clock-sb";
 				reg = <0x18000 0x100>;
 				reg = <0x18000 0x100>;
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,

+ 3 - 3
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi

@@ -130,8 +130,8 @@
 				reg = <0x700600 0x50>;
 				reg = <0x700600 0x50>;
 				#address-cells = <0x1>;
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
 				#size-cells = <0x0>;
-				cell-index = <1>;
-				clocks = <&cps_syscon0 0 3>;
+				cell-index = <3>;
+				clocks = <&cps_syscon0 1 21>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 
@@ -140,7 +140,7 @@
 				reg = <0x700680 0x50>;
 				reg = <0x700680 0x50>;
 				#address-cells = <1>;
 				#address-cells = <1>;
 				#size-cells = <0>;
 				#size-cells = <0>;
-				cell-index = <2>;
+				cell-index = <4>;
 				clocks = <&cps_syscon0 1 21>;
 				clocks = <&cps_syscon0 1 21>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};

+ 5 - 2
arch/arm64/boot/dts/rockchip/rk3399.dtsi

@@ -300,8 +300,11 @@
 		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
 		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
 			  0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
 			  0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
 		resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
 		resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
-			 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
-		reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+			 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+			 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+			 <&cru SRST_A_PCIE>;
+		reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+			      "pm", "pclk", "aclk";
 		status = "disabled";
 		status = "disabled";
 
 
 		pcie0_intc: interrupt-controller {
 		pcie0_intc: interrupt-controller {

+ 1 - 0
arch/arm64/include/asm/kvm_asm.h

@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 

+ 3 - 0
arch/arm64/include/asm/kvm_host.h

@@ -62,6 +62,9 @@ struct kvm_arch {
 	/* VTTBR value associated with above pgd and vmid */
 	/* VTTBR value associated with above pgd and vmid */
 	u64    vttbr;
 	u64    vttbr;
 
 
+	/* The last vcpu id that ran on each physical CPU */
+	int __percpu *last_vcpu_ran;
+
 	/* The maximum number of vCPUs depends on the used GIC model */
 	/* The maximum number of vCPUs depends on the used GIC model */
 	int max_vcpus;
 	int max_vcpus;
 
 

+ 1 - 1
arch/arm64/include/asm/kvm_mmu.h

@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 	return v;
 	return v;
 }
 }
 
 
-#define kern_hyp_va(v) 	(typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
 
 /*
 /*
  * We currently only support a 40bit IPA.
  * We currently only support a 40bit IPA.

+ 9 - 1
arch/arm64/include/asm/perf_event.h

@@ -46,7 +46,15 @@
 #define	ARMV8_PMU_EVTYPE_MASK	0xc800ffff	/* Mask for writable bits */
 #define	ARMV8_PMU_EVTYPE_MASK	0xc800ffff	/* Mask for writable bits */
 #define	ARMV8_PMU_EVTYPE_EVENT	0xffff		/* Mask for EVENT bits */
 #define	ARMV8_PMU_EVTYPE_EVENT	0xffff		/* Mask for EVENT bits */
 
 
-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR	0	/* Software increment event */
+/*
+ * PMUv3 event types: required events
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR				0x00
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL			0x03
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE				0x04
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED				0x10
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES				0x11
+#define ARMV8_PMUV3_PERFCTR_BR_PRED				0x12
 
 
 /*
 /*
  * Event filters for PMUv3
  * Event filters for PMUv3

+ 1 - 9
arch/arm64/kernel/perf_event.c

@@ -31,17 +31,9 @@
 
 
 /*
 /*
  * ARMv8 PMUv3 Performance Events handling code.
  * ARMv8 PMUv3 Performance Events handling code.
- * Common event types.
+ * Common event types (some are defined in asm/perf_event.h).
  */
  */
 
 
-/* Required events. */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR				0x00
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL			0x03
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE				0x04
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED				0x10
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES				0x11
-#define ARMV8_PMUV3_PERFCTR_BR_PRED				0x12
-
 /* At least one of the following is required. */
 /* At least one of the following is required. */
 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED			0x08
 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED			0x08
 #define ARMV8_PMUV3_PERFCTR_INST_SPEC				0x1B
 #define ARMV8_PMUV3_PERFCTR_INST_SPEC				0x1B

+ 15 - 0
arch/arm64/kvm/hyp/tlb.c

@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 	write_sysreg(0, vttbr_el2);
 	write_sysreg(0, vttbr_el2);
 }
 }
 
 
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+	/* Switch to requested VMID */
+	write_sysreg(kvm->arch.vttbr, vttbr_el2);
+	isb();
+
+	asm volatile("tlbi vmalle1" : : );
+	dsb(nsh);
+	isb();
+
+	write_sysreg(0, vttbr_el2);
+}
+
 void __hyp_text __kvm_flush_vm_context(void)
 void __hyp_text __kvm_flush_vm_context(void)
 {
 {
 	dsb(ishst);
 	dsb(ishst);

+ 8 - 2
arch/arm64/kvm/sys_regs.c

@@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 
 
 			idx = ARMV8_PMU_CYCLE_IDX;
 			idx = ARMV8_PMU_CYCLE_IDX;
 		} else {
 		} else {
-			BUG();
+			return false;
 		}
 		}
+	} else if (r->CRn == 0 && r->CRm == 9) {
+		/* PMCCNTR */
+		if (pmu_access_event_counter_el0_disabled(vcpu))
+			return false;
+
+		idx = ARMV8_PMU_CYCLE_IDX;
 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 		/* PMEVCNTRn_EL0 */
 		/* PMEVCNTRn_EL0 */
 		if (pmu_access_event_counter_el0_disabled(vcpu))
 		if (pmu_access_event_counter_el0_disabled(vcpu))
@@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 
 
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 	} else {
 	} else {
-		BUG();
+		return false;
 	}
 	}
 
 
 	if (!pmu_counter_idx_valid(vcpu, idx))
 	if (!pmu_counter_idx_valid(vcpu, idx))

+ 1 - 0
arch/nios2/kernel/time.c

@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
 		ret = nios2_clocksource_init(timer);
 		ret = nios2_clocksource_init(timer);
 		break;
 		break;
 	default:
 	default:
+		ret = 0;
 		break;
 		break;
 	}
 	}
 
 

+ 12 - 3
arch/powerpc/include/asm/exception-64s.h

@@ -91,7 +91,7 @@
  */
  */
 #define LOAD_HANDLER(reg, label)					\
 #define LOAD_HANDLER(reg, label)					\
 	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
 	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
-	ori	reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
 
 
 #define __LOAD_HANDLER(reg, label)					\
 #define __LOAD_HANDLER(reg, label)					\
 	ld	reg,PACAKBASE(r13);					\
 	ld	reg,PACAKBASE(r13);					\
@@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943)						\
 	std	ra,offset(r13);						\
 	std	ra,offset(r13);						\
 END_FTR_SECTION_NESTED(ftr,ftr,943)
 END_FTR_SECTION_NESTED(ftr,ftr,943)
 
 
-#define EXCEPTION_PROLOG_0(area)					\
-	GET_PACA(r13);							\
+#define EXCEPTION_PROLOG_0_PACA(area)					\
 	std	r9,area+EX_R9(r13);	/* save r9 */			\
 	std	r9,area+EX_R9(r13);	/* save r9 */			\
 	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);			\
 	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);			\
 	HMT_MEDIUM;							\
 	HMT_MEDIUM;							\
 	std	r10,area+EX_R10(r13);	/* save r10 - r12 */		\
 	std	r10,area+EX_R10(r13);	/* save r10 - r12 */		\
 	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
 	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
 
 
+#define EXCEPTION_PROLOG_0(area)					\
+	GET_PACA(r13);							\
+	EXCEPTION_PROLOG_0_PACA(area)
+
 #define __EXCEPTION_PROLOG_1(area, extra, vec)				\
 #define __EXCEPTION_PROLOG_1(area, extra, vec)				\
 	OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);		\
 	OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);		\
 	OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);		\
 	OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);		\
@@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 	EXCEPTION_PROLOG_1(area, extra, vec);				\
 	EXCEPTION_PROLOG_1(area, extra, vec);				\
 	EXCEPTION_PROLOG_PSERIES_1(label, h);
 	EXCEPTION_PROLOG_PSERIES_1(label, h);
 
 
+/* Have the PACA in r13 already */
+#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec)	\
+	EXCEPTION_PROLOG_0_PACA(area);					\
+	EXCEPTION_PROLOG_1(area, extra, vec);				\
+	EXCEPTION_PROLOG_PSERIES_1(label, h);
+
 #define __KVMTEST(h, n)							\
 #define __KVMTEST(h, n)							\
 	lbz	r10,HSTATE_IN_GUEST(r13);				\
 	lbz	r10,HSTATE_IN_GUEST(r13);				\
 	cmpwi	r10,0;							\
 	cmpwi	r10,0;							\

+ 1 - 0
arch/powerpc/include/asm/ppc-opcode.h

@@ -460,5 +460,6 @@
 
 
 #define PPC_SLBIA(IH)	stringify_in_c(.long PPC_INST_SLBIA | \
 #define PPC_SLBIA(IH)	stringify_in_c(.long PPC_INST_SLBIA | \
 				       ((IH & 0x7) << 21))
 				       ((IH & 0x7) << 21))
+#define PPC_INVALIDATE_ERAT	PPC_SLBIA(7)
 
 
 #endif /* _ASM_POWERPC_PPC_OPCODE_H */
 #endif /* _ASM_POWERPC_PPC_OPCODE_H */

+ 8 - 3
arch/powerpc/kernel/exceptions-64s.S

@@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100)
 
 
 EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
 EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
 	SET_SCRATCH0(r13)
 	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+	GET_PACA(r13)
+	clrrdi	r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
+	EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
 				 IDLETEST, 0x100)
 				 IDLETEST, 0x100)
 
 
 EXC_REAL_END(system_reset, 0x100, 0x200)
 EXC_REAL_END(system_reset, 0x100, 0x200)
@@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200)
 
 
 #ifdef CONFIG_PPC_P7_NAP
 #ifdef CONFIG_PPC_P7_NAP
 EXC_COMMON_BEGIN(system_reset_idle_common)
 EXC_COMMON_BEGIN(system_reset_idle_common)
+BEGIN_FTR_SECTION
+	GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 	bl	pnv_restore_hyp_resource
 	bl	pnv_restore_hyp_resource
 
 
 	li	r0,PNV_THREAD_RUNNING
 	li	r0,PNV_THREAD_RUNNING
@@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
 	SET_SCRATCH0(r13)		/* save r13 */
 	SET_SCRATCH0(r13)		/* save r13 */
 	/*
 	/*
 	 * Running native on arch 2.06 or later, we may wakeup from winkle
 	 * Running native on arch 2.06 or later, we may wakeup from winkle
-	 * inside machine check. If yes, then last bit of HSPGR0 would be set
+	 * inside machine check. If yes, then last bit of HSPRG0 would be set
 	 * to 1. Hence clear it unconditionally.
 	 * to 1. Hence clear it unconditionally.
 	 */
 	 */
 	GET_PACA(r13)
 	GET_PACA(r13)
@@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
 	/*
 	/*
 	 * Go back to winkle. Please note that this thread was woken up in
 	 * Go back to winkle. Please note that this thread was woken up in
 	 * machine check from winkle and have not restored the per-subcore
 	 * machine check from winkle and have not restored the per-subcore
-	 * state. Hence before going back to winkle, set last bit of HSPGR0
+	 * state. Hence before going back to winkle, set last bit of HSPRG0
 	 * to 1. This will make sure that if this thread gets woken up
 	 * to 1. This will make sure that if this thread gets woken up
 	 * again at reset vector 0x100 then it will get chance to restore
 	 * again at reset vector 0x100 then it will get chance to restore
 	 * the subcore state.
 	 * the subcore state.

+ 21 - 21
arch/powerpc/kernel/process.c

@@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs)
 		int instr;
 		int instr;
 
 
 		if (!(i % 8))
 		if (!(i % 8))
-			printk("\n");
+			pr_cont("\n");
 
 
 #if !defined(CONFIG_BOOKE)
 #if !defined(CONFIG_BOOKE)
 		/* If executing with the IMMU off, adjust pc rather
 		/* If executing with the IMMU off, adjust pc rather
@@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs)
 
 
 		if (!__kernel_text_address(pc) ||
 		if (!__kernel_text_address(pc) ||
 		     probe_kernel_address((unsigned int __user *)pc, instr)) {
 		     probe_kernel_address((unsigned int __user *)pc, instr)) {
-			printk(KERN_CONT "XXXXXXXX ");
+			pr_cont("XXXXXXXX ");
 		} else {
 		} else {
 			if (regs->nip == pc)
 			if (regs->nip == pc)
-				printk(KERN_CONT "<%08x> ", instr);
+				pr_cont("<%08x> ", instr);
 			else
 			else
-				printk(KERN_CONT "%08x ", instr);
+				pr_cont("%08x ", instr);
 		}
 		}
 
 
 		pc += sizeof(int);
 		pc += sizeof(int);
 	}
 	}
 
 
-	printk("\n");
+	pr_cont("\n");
 }
 }
 
 
 struct regbit {
 struct regbit {
@@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
 
 
 	for (; bits->bit; ++bits)
 	for (; bits->bit; ++bits)
 		if (val & bits->bit) {
 		if (val & bits->bit) {
-			printk("%s%s", s, bits->name);
+			pr_cont("%s%s", s, bits->name);
 			s = sep;
 			s = sep;
 		}
 		}
 }
 }
@@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val)
  *   T: Transactional	(bit 34)
  *   T: Transactional	(bit 34)
  */
  */
 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
-		printk(",TM[");
+		pr_cont(",TM[");
 		print_bits(val, msr_tm_bits, "");
 		print_bits(val, msr_tm_bits, "");
-		printk("]");
+		pr_cont("]");
 	}
 	}
 }
 }
 #else
 #else
@@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {}
 
 
 static void print_msr_bits(unsigned long val)
 static void print_msr_bits(unsigned long val)
 {
 {
-	printk("<");
+	pr_cont("<");
 	print_bits(val, msr_bits, ",");
 	print_bits(val, msr_bits, ",");
 	print_tm_bits(val);
 	print_tm_bits(val);
-	printk(">");
+	pr_cont(">");
 }
 }
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
@@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs)
 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
 	trap = TRAP(regs);
 	trap = TRAP(regs);
 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
-		printk("CFAR: "REG" ", regs->orig_gpr3);
+		pr_cont("CFAR: "REG" ", regs->orig_gpr3);
 	if (trap == 0x200 || trap == 0x300 || trap == 0x600)
 	if (trap == 0x200 || trap == 0x300 || trap == 0x600)
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-		printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
+		pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
 #else
 #else
-		printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+		pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
 #endif
 #endif
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-	printk("SOFTE: %ld ", regs->softe);
+	pr_cont("SOFTE: %ld ", regs->softe);
 #endif
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	if (MSR_TM_ACTIVE(regs->msr))
 	if (MSR_TM_ACTIVE(regs->msr))
-		printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
+		pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
 #endif
 #endif
 
 
 	for (i = 0;  i < 32;  i++) {
 	for (i = 0;  i < 32;  i++) {
 		if ((i % REGS_PER_LINE) == 0)
 		if ((i % REGS_PER_LINE) == 0)
-			printk("\nGPR%02d: ", i);
-		printk(REG " ", regs->gpr[i]);
+			pr_cont("\nGPR%02d: ", i);
+		pr_cont(REG " ", regs->gpr[i]);
 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
 			break;
 			break;
 	}
 	}
-	printk("\n");
+	pr_cont("\n");
 #ifdef CONFIG_KALLSYMS
 #ifdef CONFIG_KALLSYMS
 	/*
 	/*
 	 * Lookup NIP late so we have the best change of getting the
 	 * Lookup NIP late so we have the best change of getting the
@@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 			if ((ip == rth) && curr_frame >= 0) {
 			if ((ip == rth) && curr_frame >= 0) {
-				printk(" (%pS)",
+				pr_cont(" (%pS)",
 				       (void *)current->ret_stack[curr_frame].ret);
 				       (void *)current->ret_stack[curr_frame].ret);
 				curr_frame--;
 				curr_frame--;
 			}
 			}
 #endif
 #endif
 			if (firstframe)
 			if (firstframe)
-				printk(" (unreliable)");
-			printk("\n");
+				pr_cont(" (unreliable)");
+			pr_cont("\n");
 		}
 		}
 		firstframe = 0;
 		firstframe = 0;
 
 

+ 14 - 6
arch/powerpc/kernel/setup_64.c

@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
 		if (firmware_has_feature(FW_FEATURE_OPAL))
 		if (firmware_has_feature(FW_FEATURE_OPAL))
 			opal_configure_cores();
 			opal_configure_cores();
 
 
-		/* Enable AIL if supported, and we are in hypervisor mode */
-		if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
-		    early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
-			unsigned long lpcr = mfspr(SPRN_LPCR);
-			mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
-		}
+		/* AIL on native is done in cpu_ready_for_interrupts() */
 	}
 	}
 }
 }
 
 
 static void cpu_ready_for_interrupts(void)
 static void cpu_ready_for_interrupts(void)
 {
 {
+	/*
+	 * Enable AIL if supported, and we are in hypervisor mode. This
+	 * is called once for every processor.
+	 *
+	 * If we are not in hypervisor mode the job is done once for
+	 * the whole partition in configure_exceptions().
+	 */
+	if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+	    early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		unsigned long lpcr = mfspr(SPRN_LPCR);
+		mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+	}
+
 	/* Set IR and DR in PACA MSR */
 	/* Set IR and DR in PACA MSR */
 	get_paca()->kernel_msr = MSR_KERNEL;
 	get_paca()->kernel_msr = MSR_KERNEL;
 }
 }

+ 4 - 0
arch/powerpc/mm/hash_utils_64.c

@@ -1029,6 +1029,10 @@ void hash__early_init_mmu_secondary(void)
 {
 {
 	/* Initialize hash table for that CPU */
 	/* Initialize hash table for that CPU */
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			update_hid_for_hash();
+
 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
 			mtspr(SPRN_SDR1, _SDR1);
 			mtspr(SPRN_SDR1, _SDR1);
 		else
 		else

+ 4 - 0
arch/powerpc/mm/pgtable-radix.c

@@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void)
 	 * update partition table control register and UPRT
 	 * update partition table control register and UPRT
 	 */
 	 */
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			update_hid_for_radix();
+
 		lpcr = mfspr(SPRN_LPCR);
 		lpcr = mfspr(SPRN_LPCR);
 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 
 

+ 4 - 0
arch/powerpc/mm/tlb-radix.c

@@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
 	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
 	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
 		__tlbiel_pid(pid, set, ric);
 		__tlbiel_pid(pid, set, ric);
 	}
 	}
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 	return;
 	return;
 }
 }
 
 
@@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 	asm volatile("ptesync": : :"memory");
 	asm volatile("ptesync": : :"memory");
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 }
 }
 
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
 static inline void _tlbie_va(unsigned long va, unsigned long pid,

+ 2 - 0
arch/s390/kernel/vmlinux.lds.S

@@ -62,9 +62,11 @@ SECTIONS
 
 
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);
 	__start_ro_after_init = .;
 	__start_ro_after_init = .;
+	__start_data_ro_after_init = .;
 	.data..ro_after_init : {
 	.data..ro_after_init : {
 		 *(.data..ro_after_init)
 		 *(.data..ro_after_init)
 	}
 	}
+	__end_data_ro_after_init = .;
 	EXCEPTION_TABLE(16)
 	EXCEPTION_TABLE(16)
 	. = ALIGN(PAGE_SIZE);
 	. = ALIGN(PAGE_SIZE);
 	__end_ro_after_init = .;
 	__end_ro_after_init = .;

+ 1 - 1
arch/s390/pci/pci_dma.c

@@ -423,7 +423,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	dma_addr_t dma_addr_base, dma_addr;
 	dma_addr_t dma_addr_base, dma_addr;
 	int flags = ZPCI_PTE_VALID;
 	int flags = ZPCI_PTE_VALID;
 	struct scatterlist *s;
 	struct scatterlist *s;
-	unsigned long pa;
+	unsigned long pa = 0;
 	int ret;
 	int ret;
 
 
 	size = PAGE_ALIGN(size);
 	size = PAGE_ALIGN(size);

+ 23 - 0
arch/sparc/Kconfig

@@ -43,6 +43,7 @@ config SPARC
 	select ARCH_HAS_SG_CHAIN
 	select ARCH_HAS_SG_CHAIN
 	select CPU_NO_EFFICIENT_FFS
 	select CPU_NO_EFFICIENT_FFS
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_HARDENED_USERCOPY
+	select PROVE_LOCKING_SMALL if PROVE_LOCKING
 
 
 config SPARC32
 config SPARC32
 	def_bool !64BIT
 	def_bool !64BIT
@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
 config ARCH_PROC_KCORE_TEXT
 config ARCH_PROC_KCORE_TEXT
 	def_bool y
 	def_bool y
 
 
+config ARCH_ATU
+	bool
+	default y if SPARC64
+
+config ARCH_DMA_ADDR_T_64BIT
+	bool
+	default y if ARCH_ATU
+
 config IOMMU_HELPER
 config IOMMU_HELPER
 	bool
 	bool
 	default y if SPARC64
 	default y if SPARC64
@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE
 config ARCH_SPARSEMEM_DEFAULT
 config ARCH_SPARSEMEM_DEFAULT
 	def_bool y if SPARC64
 	def_bool y if SPARC64
 
 
+config FORCE_MAX_ZONEORDER
+	int "Maximum zone order"
+	default "13"
+	help
+	  The kernel memory allocator divides physically contiguous memory
+	  blocks into "zones", where each zone is a power of two number of
+	  pages.  This option selects the largest power of two that the kernel
+	  keeps in the memory allocator.  If you need to allocate very large
+	  blocks of physically contiguous memory, then you may need to
+	  increase this value.
+
+	  This config option is actually maximum order plus one. For example,
+	  a value of 13 means that the largest free memory block is 2^12 pages.
+
 source "mm/Kconfig"
 source "mm/Kconfig"
 
 
 if SPARC64
 if SPARC64

+ 343 - 0
arch/sparc/include/asm/hypervisor.h

@@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
  */
  */
 #define HV_FAST_PCI_MSG_SETVALID	0xd3
 #define HV_FAST_PCI_MSG_SETVALID	0xd3
 
 
+/* PCI IOMMU v2 definitions and services
+ *
+ * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
+ * definitions and services.
+ *
+ *	CTE		Clump Table Entry. First level table entry in the ATU.
+ *
+ *	pci_device_list
+ *			A 32-bit aligned list of pci_devices.
+ *
+ *	pci_device_listp
+ *			real address of a pci_device_list. 32-bit aligned.
+ *
+ *	iotte		IOMMU translation table entry.
+ *
+ *	iotte_attributes
+ *			IO Attributes for IOMMU v2 mappings. In addition to
+ *			read, write IOMMU v2 supports relax ordering
+ *
+ *	io_page_list	A 64-bit aligned list of real addresses. Each real
+ *			address in an io_page_list must be properly aligned
+ *			to the pagesize of the given IOTSB.
+ *
+ *	io_page_list_p	Real address of an io_page_list, 64-bit aligned.
+ *
+ *	IOTSB		IO Translation Storage Buffer. An aligned table of
+ *			IOTTEs. Each IOTSB has a pagesize, table size, and
+ *			virtual address associated with it that must match
+ *			a pagesize and table size supported by the un-derlying
+ *			hardware implementation. The alignment requirements
+ *			for an IOTSB depend on the pagesize used for that IOTSB.
+ *			Each IOTTE in an IOTSB maps one pagesize-sized page.
+ *			The size of the IOTSB dictates how large of a virtual
+ *			address space the IOTSB is capable of mapping.
+ *
+ *	iotsb_handle	An opaque identifier for an IOTSB. A devhandle plus
+ *			iotsb_handle represents a binding of an IOTSB to a
+ *			PCI root complex.
+ *
+ *	iotsb_index	Zero-based IOTTE number within an IOTSB.
+ */
+
+/* The index_count argument consists of two fields:
+ * bits 63:48 #iottes and bits 47:0 iotsb_index
+ */
+#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
+	(((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
+
+/* pci_iotsb_conf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_CONF
+ * ARG0:	devhandle
+ * ARG1:	r_addr
+ * ARG2:	size
+ * ARG3:	pagesize
+ * ARG4:	iova
+ * RET0:	status
+ * RET1:	iotsb_handle
+ * ERRORS:	EINVAL		Invalid devhandle, size, iova, or pagesize
+ *		EBADALIGN	r_addr is not properly aligned
+ *		ENORADDR	r_addr is not a valid real address
+ *		ETOOMANY	No further IOTSBs may be configured
+ *		EBUSY		Duplicate devhandle, raddir, iova combination
+ *
+ * Create an IOTSB suitable for the PCI root complex identified by devhandle,
+ * for the DMA virtual address defined by the argument iova.
+ *
+ * r_addr is the properly aligned base address of the IOTSB and size is the
+ * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
+ * being configured. If it contains any values other than zeros then the
+ * behavior is undefined.
+ *
+ * pagesize is the size of each page in the IOTSB. Note that the combination of
+ * size (table size) and pagesize must be valid.
+ *
+ * virt is the DMA virtual address this IOTSB will map.
+ *
+ * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
+ * Once configured, privileged access to the IOTSB memory is prohibited and
+ * creates undefined behavior. The only permitted access is indirect via these
+ * services.
+ */
+#define HV_FAST_PCI_IOTSB_CONF		0x190
+
+/* pci_iotsb_info()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_INFO
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * RET0:	status
+ * RET1:	r_addr
+ * RET2:	size
+ * RET3:	pagesize
+ * RET4:	iova
+ * RET5:	#bound
+ * ERRORS:	EINVAL	Invalid devhandle or iotsb_handle
+ *
+ * This service returns configuration information about an IOTSB previously
+ * created with pci_iotsb_conf.
+ *
+ * iotsb_handle value 0 may be used with this service to inquire about the
+ * legacy IOTSB that may or may not exist. If the service succeeds, the return
+ * values describe the legacy IOTSB and I/O virtual addresses mapped by that
+ * table. However, the table base address r_addr may contain the value -1 which
+ * indicates a memory range that cannot be accessed or be reclaimed.
+ *
+ * The return value #bound contains the number of PCI devices that iotsb_handle
+ * is currently bound to.
+ */
+#define HV_FAST_PCI_IOTSB_INFO		0x191
+
+/* pci_iotsb_unconf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_UNCONF
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * RET0:	status
+ * ERRORS:	EINVAL	Invalid devhandle or iotsb_handle
+ *		EBUSY	The IOTSB is bound and may not be unconfigured
+ *
+ * This service unconfigures the IOTSB identified by the devhandle and
+ * iotsb_handle arguments, previously created with pci_iotsb_conf.
+ * The IOTSB must not be currently bound to any device or the service will fail
+ *
+ * If the call succeeds, iotsb_handle is no longer valid.
+ */
+#define HV_FAST_PCI_IOTSB_UNCONF	0x192
+
+/* pci_iotsb_bind()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_BIND
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	pci_device
+ * RET0:	status
+ * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, or pci_device
+ *		EBUSY	A PCI function is already bound to an IOTSB at the same
+ *			address range as specified by devhandle, iotsb_handle.
+ *
+ * This service binds the PCI function specified by the argument pci_device to
+ * the IOTSB specified by the arguments devhandle and iotsb_handle.
+ *
+ * The PCI device function is bound to the specified IOTSB with the IOVA range
+ * specified when the IOTSB was configured via pci_iotsb_conf. If the function
+ * is already bound then it is unbound first.
+ */
+#define HV_FAST_PCI_IOTSB_BIND		0x193
+
+/* pci_iotsb_unbind()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_UNBIND
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	pci_device
+ * RET0:	status
+ * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, or pci_device
+ *		ENOMAP	The PCI function was not bound to the specified IOTSB
+ *
+ * This service unbinds the PCI device specified by the argument pci_device
+ * from the IOTSB identified  * by the arguments devhandle and iotsb_handle.
+ *
+ * If the PCI device is not bound to the specified IOTSB then this service will
+ * fail with status ENOMAP
+ */
+#define HV_FAST_PCI_IOTSB_UNBIND	0x194
+
+/* pci_iotsb_get_binding()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_GET_BINDING
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	iova
+ * RET0:	status
+ * RET1:	iotsb_handle
+ * ERRORS:	EINVAL	Invalid devhandle, pci_device, or iova
+ *		ENOMAP	The PCI function is not bound to an IOTSB at iova
+ *
+ * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
+ * and DMA virtual address, iova.
+ *
+ * iova must be the base address of a DMA virtual address range as defined by
+ * the iommu-address-ranges property in the root complex device node defined
+ * by the argument devhandle.
+ */
+#define HV_FAST_PCI_IOTSB_GET_BINDING	0x195
+
+/* pci_iotsb_map()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_MAP
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	index_count
+ * ARG3:	iotte_attributes
+ * ARG4:	io_page_list_p
+ * RET0:	status
+ * RET1:	#mapped
+ * ERRORS:	EINVAL		Invalid devhandle, iotsb_handle, #iottes,
+ *				iotsb_index or iotte_attributes
+ *		EBADALIGN	Improperly aligned io_page_list_p or I/O page
+ *				address in the I/O page list.
+ *		ENORADDR	Invalid io_page_list_p or I/O page address in
+ *				the I/O page list.
+ *
+ * This service creates and flushes mappings in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The index_count argument consists of two fields. Bits 63:48 contain #iotte
+ * and bits 47:0 contain iotsb_index
+ *
+ * The first mapping is created in the IOTSB index specified by iotsb_index.
+ * Subsequent mappings are  created at iotsb_index+1 and so on.
+ *
+ * The attributes of each mapping are defined by the argument iotte_attributes.
+ *
+ * The io_page_list_p specifies the real address of the 64-bit-aligned list of
+ * #iottes I/O page addresses. Each page address must be a properly aligned
+ * real address of a page to be mapped in the IOTSB. The first entry in the I/O
+ * page list contains the real address of the first page, the 2nd entry for the
+ * 2nd page, and so on.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The return value #mapped is the actual number of mappings created, which may
+ * be less than or equal to the argument #iottes. If the function returns
+ * successfully with a #mapped value less than the requested #iottes then the
+ * caller should continue to invoke the service with updated iotsb_index,
+ * #iottes, and io_page_list_p arguments until all pages are mapped.
+ *
+ * This service must not be used to demap a mapping. In other words, all
+ * mappings must be valid and have  one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP		0x196
+
+/* pci_iotsb_map_one()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_MAP_ONE
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	iotsb_index
+ * ARG3:	iotte_attributes
+ * ARG4:	r_addr
+ * RET0:	status
+ * ERRORS:	EINVAL		Invalid devhandle,iotsb_handle, iotsb_index
+ *				or iotte_attributes
+ *		EBADALIGN	Improperly aligned r_addr
+ *		ENORADDR	Invalid r_addr
+ *
+ * This service creates and flushes a single mapping in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The mapping for the page at r_addr is created at the IOTSB index specified by
+ * iotsb_index with  the attributes iotte_attributes.
+ *
+ * This service must not be used to demap a mapping. In other words, the mapping
+ * must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP_ONE	0x197
+
+/* pci_iotsb_demap()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_DEMAP
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	iotsb_index
+ * ARG3:	#iottes
+ * RET0:	status
+ * RET1:	#unmapped
+ * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, iotsb_index or #iottes
+ *
+ * This service unmaps and flushes up to #iottes mappings starting at index
+ * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
+ * than or equal to the requested number of IOTTEs, #iottes.
+ *
+ * If #unmapped is less than #iottes, the caller should continue to invoke this
+ * service with updated iotsb_index and #iottes arguments until all pages are
+ * demapped.
+ */
+#define HV_FAST_PCI_IOTSB_DEMAP		0x198
+
+/* pci_iotsb_getmap()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_GETMAP
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	iotsb_index
+ * RET0:	status
+ * RET1:	r_addr
+ * RET2:	iotte_attributes
+ * ERRORS:	EINVAL	Invalid devhandle, iotsb_handle, or iotsb_index
+ *		ENOMAP	No mapping was found
+ *
+ * This service returns the mapping specified by index iotsb_index from the
+ * IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * Upon success, the real address of the mapping shall be returned in
+ * r_addr and thethe IOTTE mapping attributes shall be returned in
+ * iotte_attributes.
+ *
+ * The return value iotte_attributes may not include optional features used in
+ * the call to create the  mapping.
+ */
+#define HV_FAST_PCI_IOTSB_GETMAP	0x199
+
+/* pci_iotsb_sync_mappings()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
+ * ARG0:	devhandle
+ * ARG1:	iotsb_handle
+ * ARG2:	iotsb_index
+ * ARG3:	#iottes
+ * RET0:	status
+ * RET1:	#synced
+ * ERROS:	EINVAL	Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
+ *
+ * This service synchronizes #iottes mappings starting at index iotsb_index in
+ * the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs synchronized is returned in #synced, which may
+ * be less than or equal to the requested number, #iottes.
+ *
+ * Upon a successful return, #synced is less than #iottes, the caller should
+ * continue to invoke this service with updated iotsb_index and #iottes
+ * arguments until all pages are synchronized.
+ */
+#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS	0x19a
+
 /* Logical Domain Channel services.  */
 /* Logical Domain Channel services.  */
 
 
 #define LDC_CHANNEL_DOWN		0
 #define LDC_CHANNEL_DOWN		0
@@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
 #define HV_GRP_SDIO			0x0108
 #define HV_GRP_SDIO			0x0108
 #define HV_GRP_SDIO_ERR			0x0109
 #define HV_GRP_SDIO_ERR			0x0109
 #define HV_GRP_REBOOT_DATA		0x0110
 #define HV_GRP_REBOOT_DATA		0x0110
+#define HV_GRP_ATU			0x0111
 #define HV_GRP_M7_PERF			0x0114
 #define HV_GRP_M7_PERF			0x0114
 #define HV_GRP_NIAG_PERF		0x0200
 #define HV_GRP_NIAG_PERF		0x0200
 #define HV_GRP_FIRE_PERF		0x0201
 #define HV_GRP_FIRE_PERF		0x0201

+ 28 - 0
arch/sparc/include/asm/iommu_64.h

@@ -24,8 +24,36 @@ struct iommu_arena {
 	unsigned int	limit;
 	unsigned int	limit;
 };
 };
 
 
+#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
+
+/* Data structures for SPARC ATU architecture */
+struct atu_iotsb {
+	void	*table;		/* IOTSB table base virtual addr*/
+	u64	ra;		/* IOTSB table real addr */
+	u64	dvma_size;	/* ranges[3].size or OS slected 32G size */
+	u64	dvma_base;	/* ranges[3].base */
+	u64	table_size;	/* IOTSB table size */
+	u64	page_size;	/* IO PAGE size for IOTSB */
+	u32	iotsb_num;	/* tsbnum is same as iotsb_handle */
+};
+
+struct atu_ranges {
+	u64	base;
+	u64	size;
+};
+
+struct atu {
+	struct	atu_ranges	*ranges;
+	struct	atu_iotsb	*iotsb;
+	struct	iommu_map_table	tbl;
+	u64			base;
+	u64			size;
+	u64			dma_addr_mask;
+};
+
 struct iommu {
 struct iommu {
 	struct iommu_map_table	tbl;
 	struct iommu_map_table	tbl;
+	struct atu		*atu;
 	spinlock_t		lock;
 	spinlock_t		lock;
 	u32			dma_addr_mask;
 	u32			dma_addr_mask;
 	iopte_t			*page_table;
 	iopte_t			*page_table;

+ 1 - 0
arch/sparc/kernel/hvapi.c

@@ -39,6 +39,7 @@ static struct api_info api_table[] = {
 	{ .group = HV_GRP_SDIO,					},
 	{ .group = HV_GRP_SDIO,					},
 	{ .group = HV_GRP_SDIO_ERR,				},
 	{ .group = HV_GRP_SDIO_ERR,				},
 	{ .group = HV_GRP_REBOOT_DATA,				},
 	{ .group = HV_GRP_REBOOT_DATA,				},
+	{ .group = HV_GRP_ATU,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_NIAG_PERF,	.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_NIAG_PERF,	.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_FIRE_PERF,				},
 	{ .group = HV_GRP_FIRE_PERF,				},
 	{ .group = HV_GRP_N2_CPU,				},
 	{ .group = HV_GRP_N2_CPU,				},

+ 6 - 2
arch/sparc/kernel/iommu.c

@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
 	struct iommu *iommu = dev->archdata.iommu;
 	struct iommu *iommu = dev->archdata.iommu;
 	u64 dma_addr_mask = iommu->dma_addr_mask;
 	u64 dma_addr_mask = iommu->dma_addr_mask;
 
 
-	if (device_mask >= (1UL << 32UL))
-		return 0;
+	if (device_mask > DMA_BIT_MASK(32)) {
+		if (iommu->atu)
+			dma_addr_mask = iommu->atu->dma_addr_mask;
+		else
+			return 0;
+	}
 
 
 	if ((device_mask & dma_addr_mask) == dma_addr_mask)
 	if ((device_mask & dma_addr_mask) == dma_addr_mask)
 		return 1;
 		return 1;

+ 0 - 1
arch/sparc/kernel/iommu_common.h

@@ -13,7 +13,6 @@
 #include <linux/scatterlist.h>
 #include <linux/scatterlist.h>
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/iommu-helper.h>
 #include <linux/iommu-helper.h>
-#include <linux/scatterlist.h>
 
 
 #include <asm/iommu.h>
 #include <asm/iommu.h>
 
 

+ 360 - 58
arch/sparc/kernel/pci_sun4v.c

@@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = {
 	{ .major = 1, .minor = 1 },
 	{ .major = 1, .minor = 1 },
 };
 };
 
 
+static unsigned long vatu_major = 1;
+static unsigned long vatu_minor = 1;
+
 #define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
 #define PGLIST_NENTS	(PAGE_SIZE / sizeof(u64))
 
 
 struct iommu_batch {
 struct iommu_batch {
@@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
 }
 }
 
 
 /* Interrupts must be disabled.  */
 /* Interrupts must be disabled.  */
-static long iommu_batch_flush(struct iommu_batch *p)
+static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
 {
 {
 	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
 	struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
+	u64 *pglist = p->pglist;
+	u64 index_count;
 	unsigned long devhandle = pbm->devhandle;
 	unsigned long devhandle = pbm->devhandle;
 	unsigned long prot = p->prot;
 	unsigned long prot = p->prot;
 	unsigned long entry = p->entry;
 	unsigned long entry = p->entry;
-	u64 *pglist = p->pglist;
 	unsigned long npages = p->npages;
 	unsigned long npages = p->npages;
+	unsigned long iotsb_num;
+	unsigned long ret;
+	long num;
 
 
 	/* VPCI maj=1, min=[0,1] only supports read and write */
 	/* VPCI maj=1, min=[0,1] only supports read and write */
 	if (vpci_major < 2)
 	if (vpci_major < 2)
 		prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
 		prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
 
 
 	while (npages != 0) {
 	while (npages != 0) {
-		long num;
-
-		num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
-					  npages, prot, __pa(pglist));
-		if (unlikely(num < 0)) {
-			if (printk_ratelimit())
-				printk("iommu_batch_flush: IOMMU map of "
-				       "[%08lx:%08llx:%lx:%lx:%lx] failed with "
-				       "status %ld\n",
-				       devhandle, HV_PCI_TSBID(0, entry),
-				       npages, prot, __pa(pglist), num);
-			return -1;
+		if (mask <= DMA_BIT_MASK(32)) {
+			num = pci_sun4v_iommu_map(devhandle,
+						  HV_PCI_TSBID(0, entry),
+						  npages,
+						  prot,
+						  __pa(pglist));
+			if (unlikely(num < 0)) {
+				pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
+						   __func__,
+						   devhandle,
+						   HV_PCI_TSBID(0, entry),
+						   npages, prot, __pa(pglist),
+						   num);
+				return -1;
+			}
+		} else {
+			index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
+			iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
+			ret = pci_sun4v_iotsb_map(devhandle,
+						  iotsb_num,
+						  index_count,
+						  prot,
+						  __pa(pglist),
+						  &num);
+			if (unlikely(ret != HV_EOK)) {
+				pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
+						   __func__,
+						   devhandle, iotsb_num,
+						   index_count, prot,
+						   __pa(pglist), ret);
+				return -1;
+			}
 		}
 		}
-
 		entry += num;
 		entry += num;
 		npages -= num;
 		npages -= num;
 		pglist += num;
 		pglist += num;
@@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
 	return 0;
 	return 0;
 }
 }
 
 
-static inline void iommu_batch_new_entry(unsigned long entry)
+static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
 {
 {
 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 
 
 	if (p->entry + p->npages == entry)
 	if (p->entry + p->npages == entry)
 		return;
 		return;
 	if (p->entry != ~0UL)
 	if (p->entry != ~0UL)
-		iommu_batch_flush(p);
+		iommu_batch_flush(p, mask);
 	p->entry = entry;
 	p->entry = entry;
 }
 }
 
 
 /* Interrupts must be disabled.  */
 /* Interrupts must be disabled.  */
-static inline long iommu_batch_add(u64 phys_page)
+static inline long iommu_batch_add(u64 phys_page, u64 mask)
 {
 {
 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 
 
@@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
 
 
 	p->pglist[p->npages++] = phys_page;
 	p->pglist[p->npages++] = phys_page;
 	if (p->npages == PGLIST_NENTS)
 	if (p->npages == PGLIST_NENTS)
-		return iommu_batch_flush(p);
+		return iommu_batch_flush(p, mask);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
 /* Interrupts must be disabled.  */
 /* Interrupts must be disabled.  */
-static inline long iommu_batch_end(void)
+static inline long iommu_batch_end(u64 mask)
 {
 {
 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 	struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
 
 
 	BUG_ON(p->npages >= PGLIST_NENTS);
 	BUG_ON(p->npages >= PGLIST_NENTS);
 
 
-	return iommu_batch_flush(p);
+	return iommu_batch_flush(p, mask);
 }
 }
 
 
 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 				   dma_addr_t *dma_addrp, gfp_t gfp,
 				   dma_addr_t *dma_addrp, gfp_t gfp,
 				   unsigned long attrs)
 				   unsigned long attrs)
 {
 {
+	u64 mask;
 	unsigned long flags, order, first_page, npages, n;
 	unsigned long flags, order, first_page, npages, n;
 	unsigned long prot = 0;
 	unsigned long prot = 0;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	struct atu *atu;
+	struct iommu_map_table *tbl;
 	struct page *page;
 	struct page *page;
 	void *ret;
 	void *ret;
 	long entry;
 	long entry;
@@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 	memset((char *)first_page, 0, PAGE_SIZE << order);
 	memset((char *)first_page, 0, PAGE_SIZE << order);
 
 
 	iommu = dev->archdata.iommu;
 	iommu = dev->archdata.iommu;
+	atu = iommu->atu;
+
+	mask = dev->coherent_dma_mask;
+	if (mask <= DMA_BIT_MASK(32))
+		tbl = &iommu->tbl;
+	else
+		tbl = &atu->tbl;
 
 
-	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 				      (unsigned long)(-1), 0);
 				      (unsigned long)(-1), 0);
 
 
 	if (unlikely(entry == IOMMU_ERROR_CODE))
 	if (unlikely(entry == IOMMU_ERROR_CODE))
 		goto range_alloc_fail;
 		goto range_alloc_fail;
 
 
-	*dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+	*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 	ret = (void *) first_page;
 	ret = (void *) first_page;
 	first_page = __pa(first_page);
 	first_page = __pa(first_page);
 
 
@@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 			  entry);
 			  entry);
 
 
 	for (n = 0; n < npages; n++) {
 	for (n = 0; n < npages; n++) {
-		long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
+		long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
 		if (unlikely(err < 0L))
 		if (unlikely(err < 0L))
 			goto iommu_map_fail;
 			goto iommu_map_fail;
 	}
 	}
 
 
-	if (unlikely(iommu_batch_end() < 0L))
+	if (unlikely(iommu_batch_end(mask) < 0L))
 		goto iommu_map_fail;
 		goto iommu_map_fail;
 
 
 	local_irq_restore(flags);
 	local_irq_restore(flags);
@@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
 	return ret;
 	return ret;
 
 
 iommu_map_fail:
 iommu_map_fail:
-	iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
+	iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
 
 
 range_alloc_fail:
 range_alloc_fail:
 	free_pages(first_page, order);
 	free_pages(first_page, order);
 	return NULL;
 	return NULL;
 }
 }
 
 
-static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
-			       unsigned long npages)
+unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
+				unsigned long iotsb_num,
+				struct pci_bus *bus_dev)
+{
+	struct pci_dev *pdev;
+	unsigned long err;
+	unsigned int bus;
+	unsigned int device;
+	unsigned int fun;
+
+	list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
+		if (pdev->subordinate) {
+			/* No need to bind pci bridge */
+			dma_4v_iotsb_bind(devhandle, iotsb_num,
+					  pdev->subordinate);
+		} else {
+			bus = bus_dev->number;
+			device = PCI_SLOT(pdev->devfn);
+			fun = PCI_FUNC(pdev->devfn);
+			err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
+						   HV_PCI_DEVICE_BUILD(bus,
+								       device,
+								       fun));
+
+			/* If bind fails for one device it is going to fail
+			 * for rest of the devices because we are sharing
+			 * IOTSB. So in case of failure simply return with
+			 * error.
+			 */
+			if (err)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
+static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
+			       dma_addr_t dvma, unsigned long iotsb_num,
+			       unsigned long entry, unsigned long npages)
 {
 {
-	u32 devhandle = *(u32 *)demap_arg;
 	unsigned long num, flags;
 	unsigned long num, flags;
+	unsigned long ret;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 	do {
 	do {
-		num = pci_sun4v_iommu_demap(devhandle,
-					    HV_PCI_TSBID(0, entry),
-					    npages);
-
+		if (dvma <= DMA_BIT_MASK(32)) {
+			num = pci_sun4v_iommu_demap(devhandle,
+						    HV_PCI_TSBID(0, entry),
+						    npages);
+		} else {
+			ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
+						    entry, npages, &num);
+			if (unlikely(ret != HV_EOK)) {
+				pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
+						   ret);
+			}
+		}
 		entry += num;
 		entry += num;
 		npages -= num;
 		npages -= num;
 	} while (npages != 0);
 	} while (npages != 0);
@@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
 {
 {
 	struct pci_pbm_info *pbm;
 	struct pci_pbm_info *pbm;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	struct atu *atu;
+	struct iommu_map_table *tbl;
 	unsigned long order, npages, entry;
 	unsigned long order, npages, entry;
+	unsigned long iotsb_num;
 	u32 devhandle;
 	u32 devhandle;
 
 
 	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
 	iommu = dev->archdata.iommu;
 	iommu = dev->archdata.iommu;
 	pbm = dev->archdata.host_controller;
 	pbm = dev->archdata.host_controller;
+	atu = iommu->atu;
 	devhandle = pbm->devhandle;
 	devhandle = pbm->devhandle;
-	entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
-	dma_4v_iommu_demap(&devhandle, entry, npages);
-	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
+
+	if (dvma <= DMA_BIT_MASK(32)) {
+		tbl = &iommu->tbl;
+		iotsb_num = 0; /* we don't care for legacy iommu */
+	} else {
+		tbl = &atu->tbl;
+		iotsb_num = atu->iotsb->iotsb_num;
+	}
+	entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
+	dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
+	iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
 	order = get_order(size);
 	order = get_order(size);
 	if (order < 10)
 	if (order < 10)
 		free_pages((unsigned long)cpu, order);
 		free_pages((unsigned long)cpu, order);
@@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 				  unsigned long attrs)
 				  unsigned long attrs)
 {
 {
 	struct iommu *iommu;
 	struct iommu *iommu;
+	struct atu *atu;
+	struct iommu_map_table *tbl;
+	u64 mask;
 	unsigned long flags, npages, oaddr;
 	unsigned long flags, npages, oaddr;
 	unsigned long i, base_paddr;
 	unsigned long i, base_paddr;
-	u32 bus_addr, ret;
 	unsigned long prot;
 	unsigned long prot;
+	dma_addr_t bus_addr, ret;
 	long entry;
 	long entry;
 
 
 	iommu = dev->archdata.iommu;
 	iommu = dev->archdata.iommu;
+	atu = iommu->atu;
 
 
 	if (unlikely(direction == DMA_NONE))
 	if (unlikely(direction == DMA_NONE))
 		goto bad;
 		goto bad;
@@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
 	npages >>= IO_PAGE_SHIFT;
 	npages >>= IO_PAGE_SHIFT;
 
 
-	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+	mask = *dev->dma_mask;
+	if (mask <= DMA_BIT_MASK(32))
+		tbl = &iommu->tbl;
+	else
+		tbl = &atu->tbl;
+
+	entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
 				      (unsigned long)(-1), 0);
 				      (unsigned long)(-1), 0);
 
 
 	if (unlikely(entry == IOMMU_ERROR_CODE))
 	if (unlikely(entry == IOMMU_ERROR_CODE))
 		goto bad;
 		goto bad;
 
 
-	bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+	bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
 	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
 	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 	base_paddr = __pa(oaddr & IO_PAGE_MASK);
 	prot = HV_PCI_MAP_ATTR_READ;
 	prot = HV_PCI_MAP_ATTR_READ;
@@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
 	iommu_batch_start(dev, prot, entry);
 	iommu_batch_start(dev, prot, entry);
 
 
 	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
 	for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
-		long err = iommu_batch_add(base_paddr);
+		long err = iommu_batch_add(base_paddr, mask);
 		if (unlikely(err < 0L))
 		if (unlikely(err < 0L))
 			goto iommu_map_fail;
 			goto iommu_map_fail;
 	}
 	}
-	if (unlikely(iommu_batch_end() < 0L))
+	if (unlikely(iommu_batch_end(mask) < 0L))
 		goto iommu_map_fail;
 		goto iommu_map_fail;
 
 
 	local_irq_restore(flags);
 	local_irq_restore(flags);
@@ -310,7 +414,7 @@ bad:
 	return DMA_ERROR_CODE;
 	return DMA_ERROR_CODE;
 
 
 iommu_map_fail:
 iommu_map_fail:
-	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 	return DMA_ERROR_CODE;
 	return DMA_ERROR_CODE;
 }
 }
 
 
@@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 {
 {
 	struct pci_pbm_info *pbm;
 	struct pci_pbm_info *pbm;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	struct atu *atu;
+	struct iommu_map_table *tbl;
 	unsigned long npages;
 	unsigned long npages;
+	unsigned long iotsb_num;
 	long entry;
 	long entry;
 	u32 devhandle;
 	u32 devhandle;
 
 
@@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
 
 
 	iommu = dev->archdata.iommu;
 	iommu = dev->archdata.iommu;
 	pbm = dev->archdata.host_controller;
 	pbm = dev->archdata.host_controller;
+	atu = iommu->atu;
 	devhandle = pbm->devhandle;
 	devhandle = pbm->devhandle;
 
 
 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
 	npages >>= IO_PAGE_SHIFT;
 	npages >>= IO_PAGE_SHIFT;
 	bus_addr &= IO_PAGE_MASK;
 	bus_addr &= IO_PAGE_MASK;
-	entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
-	dma_4v_iommu_demap(&devhandle, entry, npages);
-	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+
+	if (bus_addr <= DMA_BIT_MASK(32)) {
+		iotsb_num = 0; /* we don't care for legacy iommu */
+		tbl = &iommu->tbl;
+	} else {
+		iotsb_num = atu->iotsb->iotsb_num;
+		tbl = &atu->tbl;
+	}
+	entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
+	dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
+	iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
 }
 }
 
 
 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 	unsigned long seg_boundary_size;
 	unsigned long seg_boundary_size;
 	int outcount, incount, i;
 	int outcount, incount, i;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	struct atu *atu;
+	struct iommu_map_table *tbl;
+	u64 mask;
 	unsigned long base_shift;
 	unsigned long base_shift;
 	long err;
 	long err;
 
 
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
 
 
 	iommu = dev->archdata.iommu;
 	iommu = dev->archdata.iommu;
+	atu = iommu->atu;
+
 	if (nelems == 0 || !iommu)
 	if (nelems == 0 || !iommu)
 		return 0;
 		return 0;
 	
 	
@@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 	max_seg_size = dma_get_max_seg_size(dev);
 	max_seg_size = dma_get_max_seg_size(dev);
 	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
-	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
+
+	mask = *dev->dma_mask;
+	if (mask <= DMA_BIT_MASK(32))
+		tbl = &iommu->tbl;
+	else
+		tbl = &atu->tbl;
+
+	base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
+
 	for_each_sg(sglist, s, nelems, i) {
 	for_each_sg(sglist, s, nelems, i) {
 		unsigned long paddr, npages, entry, out_entry = 0, slen;
 		unsigned long paddr, npages, entry, out_entry = 0, slen;
 
 
@@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 		/* Allocate iommu entries for that segment */
 		/* Allocate iommu entries for that segment */
 		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
 		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
 		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
-		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+		entry = iommu_tbl_range_alloc(dev, tbl, npages,
 					      &handle, (unsigned long)(-1), 0);
 					      &handle, (unsigned long)(-1), 0);
 
 
 		/* Handle failure */
 		/* Handle failure */
 		if (unlikely(entry == IOMMU_ERROR_CODE)) {
 		if (unlikely(entry == IOMMU_ERROR_CODE)) {
-			if (printk_ratelimit())
-				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
-				       " npages %lx\n", iommu, paddr, npages);
+			pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
+					   tbl, paddr, npages);
 			goto iommu_map_failed;
 			goto iommu_map_failed;
 		}
 		}
 
 
-		iommu_batch_new_entry(entry);
+		iommu_batch_new_entry(entry, mask);
 
 
 		/* Convert entry to a dma_addr_t */
 		/* Convert entry to a dma_addr_t */
-		dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
+		dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
 		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 		dma_addr |= (s->offset & ~IO_PAGE_MASK);
 
 
 		/* Insert into HW table */
 		/* Insert into HW table */
 		paddr &= IO_PAGE_MASK;
 		paddr &= IO_PAGE_MASK;
 		while (npages--) {
 		while (npages--) {
-			err = iommu_batch_add(paddr);
+			err = iommu_batch_add(paddr, mask);
 			if (unlikely(err < 0L))
 			if (unlikely(err < 0L))
 				goto iommu_map_failed;
 				goto iommu_map_failed;
 			paddr += IO_PAGE_SIZE;
 			paddr += IO_PAGE_SIZE;
@@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 		dma_next = dma_addr + slen;
 		dma_next = dma_addr + slen;
 	}
 	}
 
 
-	err = iommu_batch_end();
+	err = iommu_batch_end(mask);
 
 
 	if (unlikely(err < 0L))
 	if (unlikely(err < 0L))
 		goto iommu_map_failed;
 		goto iommu_map_failed;
@@ -475,7 +603,7 @@ iommu_map_failed:
 			vaddr = s->dma_address & IO_PAGE_MASK;
 			vaddr = s->dma_address & IO_PAGE_MASK;
 			npages = iommu_num_pages(s->dma_address, s->dma_length,
 			npages = iommu_num_pages(s->dma_address, s->dma_length,
 						 IO_PAGE_SIZE);
 						 IO_PAGE_SIZE);
-			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+			iommu_tbl_range_free(tbl, vaddr, npages,
 					     IOMMU_ERROR_CODE);
 					     IOMMU_ERROR_CODE);
 			/* XXX demap? XXX */
 			/* XXX demap? XXX */
 			s->dma_address = DMA_ERROR_CODE;
 			s->dma_address = DMA_ERROR_CODE;
@@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 	struct pci_pbm_info *pbm;
 	struct pci_pbm_info *pbm;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	struct atu *atu;
 	unsigned long flags, entry;
 	unsigned long flags, entry;
+	unsigned long iotsb_num;
 	u32 devhandle;
 	u32 devhandle;
 
 
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
 
 
 	iommu = dev->archdata.iommu;
 	iommu = dev->archdata.iommu;
 	pbm = dev->archdata.host_controller;
 	pbm = dev->archdata.host_controller;
+	atu = iommu->atu;
 	devhandle = pbm->devhandle;
 	devhandle = pbm->devhandle;
 	
 	
 	local_irq_save(flags);
 	local_irq_save(flags);
@@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
 		dma_addr_t dma_handle = sg->dma_address;
 		dma_addr_t dma_handle = sg->dma_address;
 		unsigned int len = sg->dma_length;
 		unsigned int len = sg->dma_length;
 		unsigned long npages;
 		unsigned long npages;
-		struct iommu_map_table *tbl = &iommu->tbl;
+		struct iommu_map_table *tbl;
 		unsigned long shift = IO_PAGE_SHIFT;
 		unsigned long shift = IO_PAGE_SHIFT;
 
 
 		if (!len)
 		if (!len)
 			break;
 			break;
 		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
+
+		if (dma_handle <= DMA_BIT_MASK(32)) {
+			iotsb_num = 0; /* we don't care for legacy iommu */
+			tbl = &iommu->tbl;
+		} else {
+			iotsb_num = atu->iotsb->iotsb_num;
+			tbl = &atu->tbl;
+		}
 		entry = ((dma_handle - tbl->table_map_base) >> shift);
 		entry = ((dma_handle - tbl->table_map_base) >> shift);
-		dma_4v_iommu_demap(&devhandle, entry, npages);
-		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+		dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
+				   entry, npages);
+		iommu_tbl_range_free(tbl, dma_handle, npages,
 				     IOMMU_ERROR_CODE);
 				     IOMMU_ERROR_CODE);
 		sg = sg_next(sg);
 		sg = sg_next(sg);
 	}
 	}
@@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
 	return cnt;
 	return cnt;
 }
 }
 
 
+static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
+{
+	struct atu *atu = pbm->iommu->atu;
+	struct atu_iotsb *iotsb;
+	void *table;
+	u64 table_size;
+	u64 iotsb_num;
+	unsigned long order;
+	unsigned long err;
+
+	iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
+	if (!iotsb) {
+		err = -ENOMEM;
+		goto out_err;
+	}
+	atu->iotsb = iotsb;
+
+	/* calculate size of IOTSB */
+	table_size = (atu->size / IO_PAGE_SIZE) * 8;
+	order = get_order(table_size);
+	table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+	if (!table) {
+		err = -ENOMEM;
+		goto table_failed;
+	}
+	iotsb->table = table;
+	iotsb->ra = __pa(table);
+	iotsb->dvma_size = atu->size;
+	iotsb->dvma_base = atu->base;
+	iotsb->table_size = table_size;
+	iotsb->page_size = IO_PAGE_SIZE;
+
+	/* configure and register IOTSB with HV */
+	err = pci_sun4v_iotsb_conf(pbm->devhandle,
+				   iotsb->ra,
+				   iotsb->table_size,
+				   iotsb->page_size,
+				   iotsb->dvma_base,
+				   &iotsb_num);
+	if (err) {
+		pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
+		goto iotsb_conf_failed;
+	}
+	iotsb->iotsb_num = iotsb_num;
+
+	err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
+	if (err) {
+		pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
+		goto iotsb_conf_failed;
+	}
+
+	return 0;
+
+iotsb_conf_failed:
+	free_pages((unsigned long)table, order);
+table_failed:
+	kfree(iotsb);
+out_err:
+	return err;
+}
+
+static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
+{
+	struct atu *atu = pbm->iommu->atu;
+	unsigned long err;
+	const u64 *ranges;
+	u64 map_size, num_iotte;
+	u64 dma_mask;
+	const u32 *page_size;
+	int len;
+
+	ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
+				 &len);
+	if (!ranges) {
+		pr_err(PFX "No iommu-address-ranges\n");
+		return -EINVAL;
+	}
+
+	page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
+				    NULL);
+	if (!page_size) {
+		pr_err(PFX "No iommu-pagesizes\n");
+		return -EINVAL;
+	}
+
+	/* There are 4 iommu-address-ranges supported. Each range is pair of
+	 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
+	 * while ranges[2] and ranges[3] are 64bit space.  We want to use 64bit
+	 * address ranges to support 64bit addressing. Because 'size' for
+	 * address ranges[2] and ranges[3] are same we can select either of
+	 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
+	 * large for OS to allocate IOTSB we are using fix size 32G
+	 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
+	 * to share.
+	 */
+	atu->ranges = (struct atu_ranges *)ranges;
+	atu->base = atu->ranges[3].base;
+	atu->size = ATU_64_SPACE_SIZE;
+
+	/* Create IOTSB */
+	err = pci_sun4v_atu_alloc_iotsb(pbm);
+	if (err) {
+		pr_err(PFX "Error creating ATU IOTSB\n");
+		return err;
+	}
+
+	/* Create ATU iommu map.
+	 * One bit represents one iotte in IOTSB table.
+	 */
+	dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
+	num_iotte = atu->size / IO_PAGE_SIZE;
+	map_size = num_iotte / 8;
+	atu->tbl.table_map_base = atu->base;
+	atu->dma_addr_mask = dma_mask;
+	atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
+	if (!atu->tbl.map)
+		return -ENOMEM;
+
+	iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
+			    NULL, false /* no large_pool */,
+			    0 /* default npools */,
+			    false /* want span boundary checking */);
+
+	return 0;
+}
+
 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
 {
 {
 	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
 	static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
@@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
 
 
 	pci_sun4v_scan_bus(pbm, &op->dev);
 	pci_sun4v_scan_bus(pbm, &op->dev);
 
 
+	/* if atu_init fails its not complete failure.
+	 * we can still continue using legacy iommu.
+	 */
+	if (pbm->iommu->atu) {
+		err = pci_sun4v_atu_init(pbm);
+		if (err) {
+			kfree(pbm->iommu->atu);
+			pbm->iommu->atu = NULL;
+			pr_err(PFX "ATU init failed, err=%d\n", err);
+		}
+	}
+
 	pbm->next = pci_pbm_root;
 	pbm->next = pci_pbm_root;
 	pci_pbm_root = pbm;
 	pci_pbm_root = pbm;
 
 
@@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op)
 	struct pci_pbm_info *pbm;
 	struct pci_pbm_info *pbm;
 	struct device_node *dp;
 	struct device_node *dp;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	struct atu *atu;
 	u32 devhandle;
 	u32 devhandle;
 	int i, err = -ENODEV;
 	int i, err = -ENODEV;
+	static bool hv_atu = true;
 
 
 	dp = op->dev.of_node;
 	dp = op->dev.of_node;
 
 
@@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op)
 		pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
 		pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
 			vpci_major, vpci_minor);
 			vpci_major, vpci_minor);
 
 
+		err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
+		if (err) {
+			/* don't return an error if we fail to register the
+			 * ATU group, but ATU hcalls won't be available.
+			 */
+			hv_atu = false;
+			pr_err(PFX "Could not register hvapi ATU err=%d\n",
+			       err);
+		} else {
+			pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
+				vatu_major, vatu_minor);
+		}
+
 		dma_ops = &sun4v_dma_ops;
 		dma_ops = &sun4v_dma_ops;
 	}
 	}
 
 
@@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op)
 	}
 	}
 
 
 	pbm->iommu = iommu;
 	pbm->iommu = iommu;
+	iommu->atu = NULL;
+	if (hv_atu) {
+		atu = kzalloc(sizeof(*atu), GFP_KERNEL);
+		if (!atu)
+			pr_err(PFX "Could not allocate atu\n");
+		else
+			iommu->atu = atu;
+	}
 
 
 	err = pci_sun4v_pbm_init(pbm, op, devhandle);
 	err = pci_sun4v_pbm_init(pbm, op, devhandle);
 	if (err)
 	if (err)
@@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op)
 	return 0;
 	return 0;
 
 
 out_free_iommu:
 out_free_iommu:
+	kfree(iommu->atu);
 	kfree(pbm->iommu);
 	kfree(pbm->iommu);
 
 
 out_free_controller:
 out_free_controller:

+ 21 - 0
arch/sparc/kernel/pci_sun4v.h

@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
 				     unsigned long msinum,
 				     unsigned long msinum,
 				     unsigned long valid);
 				     unsigned long valid);
 
 
+/* Sun4v HV IOMMU v2 APIs */
+unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
+				   unsigned long ra,
+				   unsigned long table_size,
+				   unsigned long page_size,
+				   unsigned long dvma_base,
+				   u64 *iotsb_num);
+unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
+				   unsigned long iotsb_num,
+				   unsigned int pci_device);
+unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
+				  unsigned long iotsb_num,
+				  unsigned long iotsb_index_iottes,
+				  unsigned long io_attributes,
+				  unsigned long io_page_list_pa,
+				  long *mapped);
+unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
+				    unsigned long iotsb_num,
+				    unsigned long iotsb_index,
+				    unsigned long iottes,
+				    unsigned long *demapped);
 #endif /* !(_PCI_SUN4V_H) */
 #endif /* !(_PCI_SUN4V_H) */

+ 68 - 0
arch/sparc/kernel/pci_sun4v_asm.S

@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
 	 mov	%o0, %o0
 	 mov	%o0, %o0
 ENDPROC(pci_sun4v_msg_setvalid)
 ENDPROC(pci_sun4v_msg_setvalid)
 
 
+	/*
+	 * %o0:	devhandle
+	 * %o1:	r_addr
+	 * %o2:	size
+	 * %o3:	pagesize
+	 * %o4:	virt
+	 * %o5: &iotsb_num/&iotsb_handle
+	 *
+	 * returns %o0:	status
+	 *         %o1:	iotsb_num/iotsb_handle
+	 */
+ENTRY(pci_sun4v_iotsb_conf)
+	mov	%o5, %g1
+	mov	HV_FAST_PCI_IOTSB_CONF, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 stx	%o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_conf)
+
+	/*
+	 * %o0:	devhandle
+	 * %o1:	iotsb_num/iotsb_handle
+	 * %o2:	pci_device
+	 *
+	 * returns %o0:	status
+	 */
+ENTRY(pci_sun4v_iotsb_bind)
+	mov	HV_FAST_PCI_IOTSB_BIND, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(pci_sun4v_iotsb_bind)
+
+	/*
+	 * %o0:	devhandle
+	 * %o1:	iotsb_num/iotsb_handle
+	 * %o2:	index_count
+	 * %o3:	iotte_attributes
+	 * %o4:	io_page_list_p
+	 * %o5: &mapped
+	 *
+	 * returns %o0:	status
+	 *         %o1:	#mapped
+	 */
+ENTRY(pci_sun4v_iotsb_map)
+	mov	%o5, %g1
+	mov	HV_FAST_PCI_IOTSB_MAP, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 stx	%o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_map)
+
+	/*
+	 * %o0:	devhandle
+	 * %o1:	iotsb_num/iotsb_handle
+	 * %o2:	iotsb_index
+	 * %o3:	#iottes
+	 * %o4: &demapped
+	 *
+	 * returns %o0:	status
+	 *         %o1:	#demapped
+	 */
+ENTRY(pci_sun4v_iotsb_demap)
+	mov	HV_FAST_PCI_IOTSB_DEMAP, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 stx	%o1, [%o4]
+ENDPROC(pci_sun4v_iotsb_demap)

+ 2 - 2
arch/sparc/kernel/signal_32.c

@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
 	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
 	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
 
 
 	/* 1. Make sure we are not getting garbage from the user */
 	/* 1. Make sure we are not getting garbage from the user */
-	if (!invalid_frame_pointer(sf, sizeof(*sf)))
+	if (invalid_frame_pointer(sf, sizeof(*sf)))
 		goto segv_and_exit;
 		goto segv_and_exit;
 
 
 	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
 	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 
 
 	synchronize_user_stack();
 	synchronize_user_stack();
 	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
 	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
-	if (!invalid_frame_pointer(sf, sizeof(*sf)))
+	if (invalid_frame_pointer(sf, sizeof(*sf)))
 		goto segv;
 		goto segv;
 
 
 	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
 	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))

+ 64 - 7
arch/sparc/mm/init_64.c

@@ -802,8 +802,10 @@ struct mdesc_mblock {
 };
 };
 static struct mdesc_mblock *mblocks;
 static struct mdesc_mblock *mblocks;
 static int num_mblocks;
 static int num_mblocks;
+static int find_numa_node_for_addr(unsigned long pa,
+				   struct node_mem_mask *pnode_mask);
 
 
-static unsigned long ra_to_pa(unsigned long addr)
+static unsigned long __init ra_to_pa(unsigned long addr)
 {
 {
 	int i;
 	int i;
 
 
@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
 	return addr;
 	return addr;
 }
 }
 
 
-static int find_node(unsigned long addr)
+static int __init find_node(unsigned long addr)
 {
 {
+	static bool search_mdesc = true;
+	static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
+	static int last_index;
 	int i;
 	int i;
 
 
 	addr = ra_to_pa(addr);
 	addr = ra_to_pa(addr);
@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
 		if ((addr & p->mask) == p->val)
 		if ((addr & p->mask) == p->val)
 			return i;
 			return i;
 	}
 	}
-	/* The following condition has been observed on LDOM guests.*/
-	WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
-		" rule. Some physical memory will be owned by node 0.");
-	return 0;
+	/* The following condition has been observed on LDOM guests because
+	 * node_masks only contains the best latency mask and value.
+	 * LDOM guest's mdesc can contain a single latency group to
+	 * cover multiple address range. Print warning message only if the
+	 * address cannot be found in node_masks nor mdesc.
+	 */
+	if ((search_mdesc) &&
+	    ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
+		/* find the available node in the mdesc */
+		last_index = find_numa_node_for_addr(addr, &last_mem_mask);
+		numadbg("find_node: latency group for address 0x%lx is %d\n",
+			addr, last_index);
+		if ((last_index < 0) || (last_index >= num_node_masks)) {
+			/* WARN_ONCE() and use default group 0 */
+			WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
+			search_mdesc = false;
+			last_index = 0;
+		}
+	}
+
+	return last_index;
 }
 }
 
 
-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
 {
 {
 	*nid = find_node(start);
 	*nid = find_node(start);
 	start += PAGE_SIZE;
 	start += PAGE_SIZE;
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
 	return numa_latency[from][to];
 	return numa_latency[from][to];
 }
 }
 
 
+static int find_numa_node_for_addr(unsigned long pa,
+				   struct node_mem_mask *pnode_mask)
+{
+	struct mdesc_handle *md = mdesc_grab();
+	u64 node, arc;
+	int i = 0;
+
+	node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
+	if (node == MDESC_NODE_NULL)
+		goto out;
+
+	mdesc_for_each_node_by_name(md, node, "group") {
+		mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
+			u64 target = mdesc_arc_target(md, arc);
+			struct mdesc_mlgroup *m = find_mlgroup(target);
+
+			if (!m)
+				continue;
+			if ((pa & m->mask) == m->match) {
+				if (pnode_mask) {
+					pnode_mask->mask = m->mask;
+					pnode_mask->val = m->match;
+				}
+				mdesc_release(md);
+				return i;
+			}
+		}
+		i++;
+	}
+
+out:
+	mdesc_release(md);
+	return -1;
+}
+
 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
 {
 {
 	int i;
 	int i;

+ 3 - 0
arch/tile/include/asm/cache.h

@@ -61,4 +61,7 @@
  */
  */
 #define __write_once __read_mostly
 #define __write_once __read_mostly
 
 
+/* __ro_after_init is the generic name for the tile arch __write_once. */
+#define __ro_after_init __read_mostly
+
 #endif /* _ASM_TILE_CACHE_H */
 #endif /* _ASM_TILE_CACHE_H */

+ 2 - 2
arch/x86/crypto/aesni-intel_glue.c

@@ -888,7 +888,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	struct scatter_walk src_sg_walk;
 	struct scatter_walk src_sg_walk;
-	struct scatter_walk dst_sg_walk;
+	struct scatter_walk dst_sg_walk = {};
 	unsigned int i;
 	unsigned int i;
 
 
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* Assuming we are supporting rfc4106 64-bit extended */
@@ -968,7 +968,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 	u8 authTag[16];
 	u8 authTag[16];
 	struct scatter_walk src_sg_walk;
 	struct scatter_walk src_sg_walk;
-	struct scatter_walk dst_sg_walk;
+	struct scatter_walk dst_sg_walk = {};
 	unsigned int i;
 	unsigned int i;
 
 
 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))

+ 28 - 4
arch/x86/events/intel/uncore_snb.c

@@ -8,8 +8,12 @@
 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
-#define PCI_DEVICE_ID_INTEL_SKL_IMC	0x191f
-#define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x1904
+#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC	0x190c
+#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC	0x1900
+#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC	0x1910
+#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC	0x190f
+#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC	0x191f
 
 
 /* SNB event control */
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
@@ -616,13 +620,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
 
 
 static const struct pci_device_id skl_uncore_pci_ids[] = {
 static const struct pci_device_id skl_uncore_pci_ids[] = {
 	{ /* IMC */
 	{ /* IMC */
-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 	},
 	},
 	{ /* IMC */
 	{ /* IMC */
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 	},
 	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
+	{ /* IMC */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
+		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+	},
 
 
 	{ /* end: all zeroes */ },
 	{ /* end: all zeroes */ },
 };
 };
@@ -666,8 +686,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
-	IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
+	IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
+	IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
+	IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
+	IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
+	IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
 	{  /* end marker */ }
 	{  /* end marker */ }
 };
 };
 
 

+ 1 - 0
arch/x86/include/asm/intel-mid.h

@@ -17,6 +17,7 @@
 
 
 extern int intel_mid_pci_init(void);
 extern int intel_mid_pci_init(void);
 extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
 extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
 
 
 extern void intel_mid_pwr_power_off(void);
 extern void intel_mid_pwr_power_off(void);
 
 

+ 4 - 1
arch/x86/kernel/apm_32.c

@@ -1042,8 +1042,11 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
 
 
 	if (apm_info.get_power_status_broken)
 	if (apm_info.get_power_status_broken)
 		return APM_32_UNSUPPORTED;
 		return APM_32_UNSUPPORTED;
-	if (apm_bios_call(&call))
+	if (apm_bios_call(&call)) {
+		if (!call.err)
+			return APM_NO_ERROR;
 		return call.err;
 		return call.err;
+	}
 	*status = call.ebx;
 	*status = call.ebx;
 	*bat = call.ecx;
 	*bat = call.ecx;
 	if (apm_info.get_power_status_swabinminutes) {
 	if (apm_info.get_power_status_swabinminutes) {

+ 1 - 5
arch/x86/kernel/cpu/amd.c

@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	unsigned bits;
 	unsigned bits;
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
-	unsigned int socket_id, core_complex_id;
 
 
 	bits = c->x86_coreid_bits;
 	bits = c->x86_coreid_bits;
 	/* Low order bits define the core id (index of core in socket) */
 	/* Low order bits define the core id (index of core in socket) */
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
 	 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
 	 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
 		return;
 		return;
 
 
-	socket_id	= (c->apicid >> bits) - 1;
-	core_complex_id	= (c->apicid & ((1 << bits) - 1)) >> 3;
-
-	per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
+	per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
 #endif
 #endif
 }
 }
 
 

+ 30 - 2
arch/x86/kernel/cpu/common.c

@@ -978,6 +978,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
 	}
 	}
 }
 }
 
 
+/*
+ * The physical to logical package id mapping is initialized from the
+ * acpi/mptables information. Make sure that CPUID actually agrees with
+ * that.
+ */
+static void sanitize_package_id(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+	unsigned int pkg, apicid, cpu = smp_processor_id();
+
+	apicid = apic->cpu_present_to_apicid(cpu);
+	pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+
+	if (apicid != c->initial_apicid) {
+		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
+		       cpu, apicid, c->initial_apicid);
+		c->initial_apicid = apicid;
+	}
+	if (pkg != c->phys_proc_id) {
+		pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
+		       cpu, pkg, c->phys_proc_id);
+		c->phys_proc_id = pkg;
+	}
+	c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
+#else
+	c->logical_proc_id = 0;
+#endif
+}
+
 /*
 /*
  * This does the hard work of actually picking apart the CPU stuff...
  * This does the hard work of actually picking apart the CPU stuff...
  */
  */
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
 #ifdef CONFIG_NUMA
 #ifdef CONFIG_NUMA
 	numa_add_cpu(smp_processor_id());
 	numa_add_cpu(smp_processor_id());
 #endif
 #endif
-	/* The boot/hotplug time assigment got cleared, restore it */
-	c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
+	sanitize_package_id(c);
 }
 }
 
 
 /*
 /*

+ 27 - 31
arch/x86/kvm/irq_comm.c

@@ -156,6 +156,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
 }
 }
 
 
 
 
+static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
+		    struct kvm *kvm, int irq_source_id, int level,
+		    bool line_status)
+{
+	if (!level)
+		return -1;
+
+	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
+}
+
 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
 			      struct kvm *kvm, int irq_source_id, int level,
 			      struct kvm *kvm, int irq_source_id, int level,
 			      bool line_status)
 			      bool line_status)
@@ -163,18 +173,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
 	struct kvm_lapic_irq irq;
 	struct kvm_lapic_irq irq;
 	int r;
 	int r;
 
 
-	if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
-		return -EWOULDBLOCK;
+	switch (e->type) {
+	case KVM_IRQ_ROUTING_HV_SINT:
+		return kvm_hv_set_sint(e, kvm, irq_source_id, level,
+				       line_status);
 
 
-	if (kvm_msi_route_invalid(kvm, e))
-		return -EINVAL;
+	case KVM_IRQ_ROUTING_MSI:
+		if (kvm_msi_route_invalid(kvm, e))
+			return -EINVAL;
 
 
-	kvm_set_msi_irq(kvm, e, &irq);
+		kvm_set_msi_irq(kvm, e, &irq);
 
 
-	if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
-		return r;
-	else
-		return -EWOULDBLOCK;
+		if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
+			return r;
+		break;
+
+	default:
+		break;
+	}
+
+	return -EWOULDBLOCK;
 }
 }
 
 
 int kvm_request_irq_source_id(struct kvm *kvm)
 int kvm_request_irq_source_id(struct kvm *kvm)
@@ -254,16 +272,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 	srcu_read_unlock(&kvm->irq_srcu, idx);
 	srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 }
 
 
-static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
-		    struct kvm *kvm, int irq_source_id, int level,
-		    bool line_status)
-{
-	if (!level)
-		return -1;
-
-	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
-}
-
 int kvm_set_routing_entry(struct kvm *kvm,
 int kvm_set_routing_entry(struct kvm *kvm,
 			  struct kvm_kernel_irq_routing_entry *e,
 			  struct kvm_kernel_irq_routing_entry *e,
 			  const struct kvm_irq_routing_entry *ue)
 			  const struct kvm_irq_routing_entry *ue)
@@ -423,18 +431,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
 	srcu_read_unlock(&kvm->irq_srcu, idx);
 	srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 }
 
 
-int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
-		     int irq_source_id, int level, bool line_status)
-{
-	switch (irq->type) {
-	case KVM_IRQ_ROUTING_HV_SINT:
-		return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
-				       line_status);
-	default:
-		return -EWOULDBLOCK;
-	}
-}
-
 void kvm_arch_irq_routing_update(struct kvm *kvm)
 void kvm_arch_irq_routing_update(struct kvm *kvm)
 {
 {
 	kvm_hv_irq_routing_update(kvm);
 	kvm_hv_irq_routing_update(kvm);

+ 34 - 13
arch/x86/kvm/x86.c

@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
 	struct kvm_shared_msrs *locals
 	struct kvm_shared_msrs *locals
 		= container_of(urn, struct kvm_shared_msrs, urn);
 		= container_of(urn, struct kvm_shared_msrs, urn);
 	struct kvm_shared_msr_values *values;
 	struct kvm_shared_msr_values *values;
+	unsigned long flags;
 
 
+	/*
+	 * Disabling irqs at this point since the following code could be
+	 * interrupted and executed through kvm_arch_hardware_disable()
+	 */
+	local_irq_save(flags);
+	if (locals->registered) {
+		locals->registered = false;
+		user_return_notifier_unregister(urn);
+	}
+	local_irq_restore(flags);
 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
 		values = &locals->values[slot];
 		values = &locals->values[slot];
 		if (values->host != values->curr) {
 		if (values->host != values->curr) {
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
 			values->curr = values->host;
 			values->curr = values->host;
 		}
 		}
 	}
 	}
-	locals->registered = false;
-	user_return_notifier_unregister(urn);
 }
 }
 
 
 static void shared_msr_update(unsigned slot, u32 msr)
 static void shared_msr_update(unsigned slot, u32 msr)
@@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 
 
 static u64 __get_kvmclock_ns(struct kvm *kvm)
 static u64 __get_kvmclock_ns(struct kvm *kvm)
 {
 {
-	struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
 	struct kvm_arch *ka = &kvm->arch;
 	struct kvm_arch *ka = &kvm->arch;
-	s64 ns;
+	struct pvclock_vcpu_time_info hv_clock;
 
 
-	if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
-		u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
-		ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
-	} else {
-		ns = ktime_get_boot_ns() + ka->kvmclock_offset;
+	spin_lock(&ka->pvclock_gtod_sync_lock);
+	if (!ka->use_master_clock) {
+		spin_unlock(&ka->pvclock_gtod_sync_lock);
+		return ktime_get_boot_ns() + ka->kvmclock_offset;
 	}
 	}
 
 
-	return ns;
+	hv_clock.tsc_timestamp = ka->master_cycle_now;
+	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
+	spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+	kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+			   &hv_clock.tsc_shift,
+			   &hv_clock.tsc_to_system_mul);
+	return __pvclock_read_cycles(&hv_clock, rdtsc());
 }
 }
 
 
 u64 get_kvmclock_ns(struct kvm *kvm)
 u64 get_kvmclock_ns(struct kvm *kvm)
@@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_PIT_STATE2:
 	case KVM_CAP_PIT_STATE2:
 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
 	case KVM_CAP_XEN_HVM:
 	case KVM_CAP_XEN_HVM:
-	case KVM_CAP_ADJUST_CLOCK:
 	case KVM_CAP_VCPU_EVENTS:
 	case KVM_CAP_VCPU_EVENTS:
 	case KVM_CAP_HYPERV:
 	case KVM_CAP_HYPERV:
 	case KVM_CAP_HYPERV_VAPIC:
 	case KVM_CAP_HYPERV_VAPIC:
@@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #endif
 #endif
 		r = 1;
 		r = 1;
 		break;
 		break;
+	case KVM_CAP_ADJUST_CLOCK:
+		r = KVM_CLOCK_TSC_STABLE;
+		break;
 	case KVM_CAP_X86_SMM:
 	case KVM_CAP_X86_SMM:
 		/* SMBASE is usually relocated above 1M on modern chipsets,
 		/* SMBASE is usually relocated above 1M on modern chipsets,
 		 * and SMM handlers might indeed rely on 4G segment limits,
 		 * and SMM handlers might indeed rely on 4G segment limits,
@@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 	};
 	};
 	case KVM_SET_VAPIC_ADDR: {
 	case KVM_SET_VAPIC_ADDR: {
 		struct kvm_vapic_addr va;
 		struct kvm_vapic_addr va;
+		int idx;
 
 
 		r = -EINVAL;
 		r = -EINVAL;
 		if (!lapic_in_kernel(vcpu))
 		if (!lapic_in_kernel(vcpu))
@@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		r = -EFAULT;
 		r = -EFAULT;
 		if (copy_from_user(&va, argp, sizeof va))
 		if (copy_from_user(&va, argp, sizeof va))
 			goto out;
 			goto out;
+		idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+		srcu_read_unlock(&vcpu->kvm->srcu, idx);
 		break;
 		break;
 	}
 	}
 	case KVM_X86_SETUP_MCE: {
 	case KVM_X86_SETUP_MCE: {
@@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
 		struct kvm_clock_data user_ns;
 		struct kvm_clock_data user_ns;
 		u64 now_ns;
 		u64 now_ns;
 
 
-		now_ns = get_kvmclock_ns(kvm);
+		local_irq_disable();
+		now_ns = __get_kvmclock_ns(kvm);
 		user_ns.clock = now_ns;
 		user_ns.clock = now_ns;
-		user_ns.flags = 0;
+		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
+		local_irq_enable();
 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
 
 		r = -EFAULT;
 		r = -EFAULT;

+ 1 - 1
arch/x86/platform/efi/efi.c

@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
 	int count = 0, pg_shift = 0;
 	int count = 0, pg_shift = 0;
 	void *new_memmap = NULL;
 	void *new_memmap = NULL;
 	efi_status_t status;
 	efi_status_t status;
-	phys_addr_t pa;
+	unsigned long pa;
 
 
 	efi.systab = NULL;
 	efi.systab = NULL;
 
 

+ 57 - 23
arch/x86/platform/efi/efi_64.c

@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/ucs2_string.h>
 
 
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/page.h>
 #include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
 	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 }
 }
 
 
+/*
+ * Wrapper for slow_virt_to_phys() that handles NULL addresses.
+ */
+static inline phys_addr_t
+virt_to_phys_or_null_size(void *va, unsigned long size)
+{
+	bool bad_size;
+
+	if (!va)
+		return 0;
+
+	if (virt_addr_valid(va))
+		return virt_to_phys(va);
+
+	/*
+	 * A fully aligned variable on the stack is guaranteed not to
+	 * cross a page bounary. Try to catch strings on the stack by
+	 * checking that 'size' is a power of two.
+	 */
+	bad_size = size > PAGE_SIZE || !is_power_of_2(size);
+
+	WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
+
+	return slow_virt_to_phys(va);
+}
+
+#define virt_to_phys_or_null(addr)				\
+	virt_to_phys_or_null_size((addr), sizeof(*(addr)))
+
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
 {
 	unsigned long pfn, text;
 	unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_tm = virt_to_phys(tm);
-	phys_tc = virt_to_phys(tc);
+	phys_tm = virt_to_phys_or_null(tm);
+	phys_tc = virt_to_phys_or_null(tc);
 
 
 	status = efi_thunk(get_time, phys_tm, phys_tc);
 	status = efi_thunk(get_time, phys_tm, phys_tc);
 
 
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_tm = virt_to_phys(tm);
+	phys_tm = virt_to_phys_or_null(tm);
 
 
 	status = efi_thunk(set_time, phys_tm);
 	status = efi_thunk(set_time, phys_tm);
 
 
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_enabled = virt_to_phys(enabled);
-	phys_pending = virt_to_phys(pending);
-	phys_tm = virt_to_phys(tm);
+	phys_enabled = virt_to_phys_or_null(enabled);
+	phys_pending = virt_to_phys_or_null(pending);
+	phys_tm = virt_to_phys_or_null(tm);
 
 
 	status = efi_thunk(get_wakeup_time, phys_enabled,
 	status = efi_thunk(get_wakeup_time, phys_enabled,
 			     phys_pending, phys_tm);
 			     phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
 
 	spin_lock(&rtc_lock);
 	spin_lock(&rtc_lock);
 
 
-	phys_tm = virt_to_phys(tm);
+	phys_tm = virt_to_phys_or_null(tm);
 
 
 	status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 	status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 
 
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 	return status;
 	return status;
 }
 }
 
 
+static unsigned long efi_name_size(efi_char16_t *name)
+{
+	return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
+}
 
 
 static efi_status_t
 static efi_status_t
 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 	u32 phys_name, phys_vendor, phys_attr;
 	u32 phys_name, phys_vendor, phys_attr;
 	u32 phys_data_size, phys_data;
 	u32 phys_data_size, phys_data;
 
 
-	phys_data_size = virt_to_phys(data_size);
-	phys_vendor = virt_to_phys(vendor);
-	phys_name = virt_to_phys(name);
-	phys_attr = virt_to_phys(attr);
-	phys_data = virt_to_phys(data);
+	phys_data_size = virt_to_phys_or_null(data_size);
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+	phys_attr = virt_to_phys_or_null(attr);
+	phys_data = virt_to_phys_or_null_size(data, *data_size);
 
 
 	status = efi_thunk(get_variable, phys_name, phys_vendor,
 	status = efi_thunk(get_variable, phys_name, phys_vendor,
 			   phys_attr, phys_data_size, phys_data);
 			   phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 	u32 phys_name, phys_vendor, phys_data;
 	u32 phys_name, phys_vendor, phys_data;
 	efi_status_t status;
 	efi_status_t status;
 
 
-	phys_name = virt_to_phys(name);
-	phys_vendor = virt_to_phys(vendor);
-	phys_data = virt_to_phys(data);
+	phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_data = virt_to_phys_or_null_size(data, data_size);
 
 
 	/* If data_size is > sizeof(u32) we've got problems */
 	/* If data_size is > sizeof(u32) we've got problems */
 	status = efi_thunk(set_variable, phys_name, phys_vendor,
 	status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
 	efi_status_t status;
 	efi_status_t status;
 	u32 phys_name_size, phys_name, phys_vendor;
 	u32 phys_name_size, phys_name, phys_vendor;
 
 
-	phys_name_size = virt_to_phys(name_size);
-	phys_vendor = virt_to_phys(vendor);
-	phys_name = virt_to_phys(name);
+	phys_name_size = virt_to_phys_or_null(name_size);
+	phys_vendor = virt_to_phys_or_null(vendor);
+	phys_name = virt_to_phys_or_null_size(name, *name_size);
 
 
 	status = efi_thunk(get_next_variable, phys_name_size,
 	status = efi_thunk(get_next_variable, phys_name_size,
 			   phys_name, phys_vendor);
 			   phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
 	efi_status_t status;
 	efi_status_t status;
 	u32 phys_count;
 	u32 phys_count;
 
 
-	phys_count = virt_to_phys(count);
+	phys_count = virt_to_phys_or_null(count);
 	status = efi_thunk(get_next_high_mono_count, phys_count);
 	status = efi_thunk(get_next_high_mono_count, phys_count);
 
 
 	return status;
 	return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
 {
 {
 	u32 phys_data;
 	u32 phys_data;
 
 
-	phys_data = virt_to_phys(data);
+	phys_data = virt_to_phys_or_null_size(data, data_size);
 
 
 	efi_thunk(reset_system, reset_type, status, data_size, phys_data);
 	efi_thunk(reset_system, reset_type, status, data_size, phys_data);
 }
 }
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 	if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 		return EFI_UNSUPPORTED;
 		return EFI_UNSUPPORTED;
 
 
-	phys_storage = virt_to_phys(storage_space);
-	phys_remaining = virt_to_phys(remaining_space);
-	phys_max = virt_to_phys(max_variable_size);
+	phys_storage = virt_to_phys_or_null(storage_space);
+	phys_remaining = virt_to_phys_or_null(remaining_space);
+	phys_max = virt_to_phys_or_null(max_variable_size);
 
 
 	status = efi_thunk(query_variable_info, attr, phys_storage,
 	status = efi_thunk(query_variable_info, attr, phys_storage,
 			   phys_remaining, phys_max);
 			   phys_remaining, phys_max);

+ 19 - 0
arch/x86/platform/intel-mid/pwr.c

@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
 }
 }
 EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
 EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
 
 
+pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
+{
+	struct mid_pwr *pwr = midpwr;
+	int id, reg, bit;
+	u32 power;
+
+	if (!pwr || !pwr->available)
+		return PCI_UNKNOWN;
+
+	id = intel_mid_pwr_get_lss_id(pdev);
+	if (id < 0)
+		return PCI_UNKNOWN;
+
+	reg = (id * LSS_PWS_BITS) / 32;
+	bit = (id * LSS_PWS_BITS) % 32;
+	power = mid_pwr_get_state(pwr, reg);
+	return (__force pci_power_t)((power >> bit) & 3);
+}
+
 void intel_mid_pwr_power_off(void)
 void intel_mid_pwr_power_off(void)
 {
 {
 	struct mid_pwr *pwr = midpwr;
 	struct mid_pwr *pwr = midpwr;

+ 1 - 0
arch/x86/purgatory/Makefile

@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
 
 
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
 KBUILD_CFLAGS += -m$(BITS)
 KBUILD_CFLAGS += -m$(BITS)
+KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
 
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
 		$(call if_changed,ld)
 		$(call if_changed,ld)

+ 8 - 1
arch/xtensa/include/uapi/asm/unistd.h

@@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6)
 #define __NR_pwritev2				347
 #define __NR_pwritev2				347
 __SYSCALL(347, sys_pwritev2, 6)
 __SYSCALL(347, sys_pwritev2, 6)
 
 
-#define __NR_syscall_count			348
+#define __NR_pkey_mprotect			348
+__SYSCALL(348, sys_pkey_mprotect, 4)
+#define __NR_pkey_alloc				349
+__SYSCALL(349, sys_pkey_alloc, 2)
+#define __NR_pkey_free				350
+__SYSCALL(350, sys_pkey_free, 1)
+
+#define __NR_syscall_count			351
 
 
 /*
 /*
  * sysxtensa syscall handler
  * sysxtensa syscall handler

+ 7 - 7
arch/xtensa/kernel/time.c

@@ -172,10 +172,11 @@ void __init time_init(void)
 {
 {
 	of_clk_init(NULL);
 	of_clk_init(NULL);
 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
-	printk("Calibrating CPU frequency ");
+	pr_info("Calibrating CPU frequency ");
 	calibrate_ccount();
 	calibrate_ccount();
-	printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
-			(int)(ccount_freq/10000)%100);
+	pr_cont("%d.%02d MHz\n",
+		(int)ccount_freq / 1000000,
+		(int)(ccount_freq / 10000) % 100);
 #else
 #else
 	ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
 	ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
 #endif
 #endif
@@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
 void calibrate_delay(void)
 void calibrate_delay(void)
 {
 {
 	loops_per_jiffy = ccount_freq / HZ;
 	loops_per_jiffy = ccount_freq / HZ;
-	printk("Calibrating delay loop (skipped)... "
-	       "%lu.%02lu BogoMIPS preset\n",
-	       loops_per_jiffy/(1000000/HZ),
-	       (loops_per_jiffy/(10000/HZ)) % 100);
+	pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
+		loops_per_jiffy / (1000000 / HZ),
+		(loops_per_jiffy / (10000 / HZ)) % 100);
 }
 }
 #endif
 #endif

+ 22 - 52
arch/xtensa/kernel/traps.c

@@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs)
 
 
 	for (i = 0; i < 16; i++) {
 	for (i = 0; i < 16; i++) {
 		if ((i % 8) == 0)
 		if ((i % 8) == 0)
-			printk(KERN_INFO "a%02d:", i);
-		printk(KERN_CONT " %08lx", regs->areg[i]);
+			pr_info("a%02d:", i);
+		pr_cont(" %08lx", regs->areg[i]);
 	}
 	}
-	printk(KERN_CONT "\n");
-
-	printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
-	       regs->pc, regs->ps, regs->depc, regs->excvaddr);
-	printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
-	       regs->lbeg, regs->lend, regs->lcount, regs->sar);
+	pr_cont("\n");
+	pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
+		regs->pc, regs->ps, regs->depc, regs->excvaddr);
+	pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
+		regs->lbeg, regs->lend, regs->lcount, regs->sar);
 	if (user_mode(regs))
 	if (user_mode(regs))
-		printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
-		       regs->windowbase, regs->windowstart, regs->wmask,
-		       regs->syscall);
+		pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
+			regs->windowbase, regs->windowstart, regs->wmask,
+			regs->syscall);
 }
 }
 
 
 static int show_trace_cb(struct stackframe *frame, void *data)
 static int show_trace_cb(struct stackframe *frame, void *data)
 {
 {
 	if (kernel_text_address(frame->pc)) {
 	if (kernel_text_address(frame->pc)) {
-		printk(" [<%08lx>] ", frame->pc);
-		print_symbol("%s\n", frame->pc);
+		pr_cont(" [<%08lx>]", frame->pc);
+		print_symbol(" %s\n", frame->pc);
 	}
 	}
 	return 0;
 	return 0;
 }
 }
@@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp)
 	if (!sp)
 	if (!sp)
 		sp = stack_pointer(task);
 		sp = stack_pointer(task);
 
 
-	printk("Call Trace:");
-#ifdef CONFIG_KALLSYMS
-	printk("\n");
-#endif
+	pr_info("Call Trace:\n");
 	walk_stackframe(sp, show_trace_cb, NULL);
 	walk_stackframe(sp, show_trace_cb, NULL);
-	printk("\n");
+#ifndef CONFIG_KALLSYMS
+	pr_cont("\n");
+#endif
 }
 }
 
 
-/*
- * This routine abuses get_user()/put_user() to reference pointers
- * with at least a bit of error checking ...
- */
-
 static int kstack_depth_to_print = 24;
 static int kstack_depth_to_print = 24;
 
 
 void show_stack(struct task_struct *task, unsigned long *sp)
 void show_stack(struct task_struct *task, unsigned long *sp)
@@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp)
 		sp = stack_pointer(task);
 		sp = stack_pointer(task);
 	stack = sp;
 	stack = sp;
 
 
-	printk("\nStack: ");
+	pr_info("Stack:\n");
 
 
 	for (i = 0; i < kstack_depth_to_print; i++) {
 	for (i = 0; i < kstack_depth_to_print; i++) {
 		if (kstack_end(sp))
 		if (kstack_end(sp))
 			break;
 			break;
-		if (i && ((i % 8) == 0))
-			printk("\n       ");
-		printk("%08lx ", *sp++);
+		pr_cont(" %08lx", *sp++);
+		if (i % 8 == 7)
+			pr_cont("\n");
 	}
 	}
-	printk("\n");
 	show_trace(task, stack);
 	show_trace(task, stack);
 }
 }
 
 
-void show_code(unsigned int *pc)
-{
-	long i;
-
-	printk("\nCode:");
-
-	for(i = -3 ; i < 6 ; i++) {
-		unsigned long insn;
-		if (__get_user(insn, pc + i)) {
-			printk(" (Bad address in pc)\n");
-			break;
-		}
-		printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
-	}
-}
-
 DEFINE_SPINLOCK(die_lock);
 DEFINE_SPINLOCK(die_lock);
 
 
 void die(const char * str, struct pt_regs * regs, long err)
 void die(const char * str, struct pt_regs * regs, long err)
 {
 {
 	static int die_counter;
 	static int die_counter;
-	int nl = 0;
 
 
 	console_verbose();
 	console_verbose();
 	spin_lock_irq(&die_lock);
 	spin_lock_irq(&die_lock);
 
 
-	printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
-	printk("PREEMPT ");
-	nl = 1;
-#endif
-	if (nl)
-		printk("\n");
+	pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
+		IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
 	show_regs(regs);
 	show_regs(regs);
 	if (!user_mode(regs))
 	if (!user_mode(regs))
 		show_stack(NULL, (unsigned long*)regs->areg[1]);
 		show_stack(NULL, (unsigned long*)regs->areg[1]);

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно