فهرست منبع

Merge branch 'linus' into x86/fpu, to resolve conflicts

 Conflicts:
	arch/x86/kernel/fpu/core.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 8 سال پیش
والد
کامیت
064e6a8ba6
100فایلهای تغییر یافته به همراه824 افزوده شده و 501 حذف شده
  1. 2 2
      Documentation/ABI/testing/sysfs-devices-system-ibm-rtl
  2. 2 2
      Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
  3. 5 0
      Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
  4. 8 3
      Documentation/devicetree/bindings/pci/rockchip-pcie.txt
  5. 5 5
      Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
  6. 1 1
      Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
  7. 0 1
      Documentation/filesystems/Locking
  8. 0 1
      Documentation/filesystems/vfs.txt
  9. 2 2
      Documentation/i2c/i2c-topology
  10. 2 1
      Documentation/networking/dsa/dsa.txt
  11. 11 0
      Documentation/virtual/kvm/api.txt
  12. 11 1
      Documentation/virtual/kvm/locking.txt
  13. 20 1
      MAINTAINERS
  14. 10 7
      Makefile
  15. 6 1
      arch/arc/Makefile
  16. 1 1
      arch/arc/boot/dts/axc001.dtsi
  17. 1 1
      arch/arc/boot/dts/nsim_700.dts
  18. 4 0
      arch/arc/boot/dts/nsimosci.dts
  19. 1 0
      arch/arc/configs/nsim_700_defconfig
  20. 1 0
      arch/arc/configs/nsim_hs_defconfig
  21. 1 0
      arch/arc/configs/nsim_hs_smp_defconfig
  22. 1 0
      arch/arc/configs/nsimosci_defconfig
  23. 1 0
      arch/arc/configs/nsimosci_hs_defconfig
  24. 1 2
      arch/arc/configs/nsimosci_hs_smp_defconfig
  25. 2 0
      arch/arc/include/asm/arcregs.h
  26. 2 2
      arch/arc/include/asm/smp.h
  27. 2 0
      arch/arc/kernel/devtree.c
  28. 20 12
      arch/arc/kernel/mcip.c
  29. 11 9
      arch/arc/kernel/process.c
  30. 15 8
      arch/arc/kernel/smp.c
  31. 11 8
      arch/arc/kernel/time.c
  32. 26 0
      arch/arc/mm/dma.c
  33. 0 6
      arch/arc/plat-eznps/smp.c
  34. 7 7
      arch/arm/boot/dts/imx53-qsb.dts
  35. 5 0
      arch/arm/boot/dts/logicpd-som-lv.dtsi
  36. 2 2
      arch/arm/boot/dts/logicpd-torpedo-som.dtsi
  37. 4 3
      arch/arm/boot/dts/omap5-board-common.dtsi
  38. 1 1
      arch/arm/boot/dts/stih410-b2260.dts
  39. 4 0
      arch/arm/boot/dts/sun8i-a23-a33.dtsi
  40. 1 0
      arch/arm/include/asm/kvm_asm.h
  41. 3 0
      arch/arm/include/asm/kvm_host.h
  42. 1 0
      arch/arm/include/asm/kvm_hyp.h
  43. 20 0
      arch/arm/kernel/traps.c
  44. 5 0
      arch/arm/kernel/vmlinux-xip.lds.S
  45. 26 1
      arch/arm/kvm/arm.c
  46. 15 0
      arch/arm/kvm/hyp/tlb.c
  47. 3 34
      arch/arm/lib/backtrace.S
  48. 1 0
      arch/arm/mach-omap2/Kconfig
  49. 11 5
      arch/arm/mach-omap2/id.c
  50. 3 0
      arch/arm/mach-omap2/prm3xxx.c
  51. 6 0
      arch/arm/mach-omap2/voltage.c
  52. 1 1
      arch/arm/mm/dma-mapping.c
  53. 1 1
      arch/arm/mm/proc-v7m.S
  54. 2 2
      arch/arm64/boot/dts/marvell/armada-37xx.dtsi
  55. 3 3
      arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
  56. 5 2
      arch/arm64/boot/dts/rockchip/rk3399.dtsi
  57. 1 1
      arch/arm64/include/asm/alternative.h
  58. 40 0
      arch/arm64/include/asm/cpucaps.h
  59. 1 19
      arch/arm64/include/asm/cpufeature.h
  60. 1 0
      arch/arm64/include/asm/kvm_asm.h
  61. 3 0
      arch/arm64/include/asm/kvm_host.h
  62. 1 1
      arch/arm64/include/asm/kvm_mmu.h
  63. 0 1
      arch/arm64/include/asm/lse.h
  64. 9 1
      arch/arm64/include/asm/perf_event.h
  65. 1 9
      arch/arm64/kernel/perf_event.c
  66. 15 0
      arch/arm64/kvm/hyp/tlb.c
  67. 8 2
      arch/arm64/kvm/sys_regs.c
  68. 1 1
      arch/mips/Makefile
  69. 2 1
      arch/mips/boot/dts/mti/malta.dts
  70. 10 6
      arch/mips/generic/init.c
  71. 13 0
      arch/mips/include/asm/fpu_emulator.h
  72. 4 3
      arch/mips/include/asm/kvm_host.h
  73. 18 0
      arch/mips/include/asm/switch_to.h
  74. 10 1
      arch/mips/kernel/mips-cpc.c
  75. 5 5
      arch/mips/kernel/mips-r2-to-r6-emul.c
  76. 4 4
      arch/mips/kernel/ptrace.c
  77. 58 80
      arch/mips/kernel/r2300_fpu.S
  78. 48 41
      arch/mips/kernel/r6000_fpu.S
  79. 1 1
      arch/mips/kernel/relocate.c
  80. 13 0
      arch/mips/kernel/setup.c
  81. 73 64
      arch/mips/kernel/traps.c
  82. 19 13
      arch/mips/kvm/emulate.c
  83. 4 1
      arch/mips/kvm/mips.c
  84. 0 4
      arch/mips/kvm/mmu.c
  85. 22 22
      arch/mips/lib/dump_tlb.c
  86. 9 9
      arch/mips/lib/r3k_dump_tlb.c
  87. 1 0
      arch/nios2/kernel/time.c
  88. 2 0
      arch/openrisc/include/asm/cache.h
  89. 3 1
      arch/parisc/include/uapi/asm/unistd.h
  90. 3 3
      arch/parisc/kernel/drivers.c
  91. 34 32
      arch/parisc/kernel/syscall.S
  92. 12 3
      arch/powerpc/include/asm/exception-64s.h
  93. 1 0
      arch/powerpc/include/asm/ppc-opcode.h
  94. 8 3
      arch/powerpc/kernel/exceptions-64s.S
  95. 21 21
      arch/powerpc/kernel/process.c
  96. 14 6
      arch/powerpc/kernel/setup_64.c
  97. 4 0
      arch/powerpc/mm/hash_utils_64.c
  98. 4 0
      arch/powerpc/mm/pgtable-radix.c
  99. 4 0
      arch/powerpc/mm/tlb-radix.c
  100. 3 3
      arch/s390/hypfs/hypfs_diag.c

+ 2 - 2
Documentation/ABI/testing/sysfs-devices-system-ibm-rtl

@@ -1,4 +1,4 @@
-What:           state
+What:           /sys/devices/system/ibm_rtl/state
 Date:           Sep 2010
 Date:           Sep 2010
 KernelVersion:  2.6.37
 KernelVersion:  2.6.37
 Contact:        Vernon Mauery <vernux@us.ibm.com>
 Contact:        Vernon Mauery <vernux@us.ibm.com>
@@ -10,7 +10,7 @@ Description:    The state file allows a means by which to change in and
 Users:          The ibm-prtm userspace daemon uses this interface.
 Users:          The ibm-prtm userspace daemon uses this interface.
 
 
 
 
-What:           version
+What:           /sys/devices/system/ibm_rtl/version
 Date:           Sep 2010
 Date:           Sep 2010
 KernelVersion:  2.6.37
 KernelVersion:  2.6.37
 Contact:        Vernon Mauery <vernux@us.ibm.com>
 Contact:        Vernon Mauery <vernux@us.ibm.com>

+ 2 - 2
Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt → Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt

@@ -6,7 +6,7 @@ perform in-band IPMI communication with their host.
 
 
 Required properties:
 Required properties:
 
 
-- compatible : should be "aspeed,ast2400-bt-bmc"
+- compatible : should be "aspeed,ast2400-ibt-bmc"
 - reg: physical address and size of the registers
 - reg: physical address and size of the registers
 
 
 Optional properties:
 Optional properties:
@@ -17,7 +17,7 @@ Optional properties:
 Example:
 Example:
 
 
 	ibt@1e789140 {
 	ibt@1e789140 {
-		compatible = "aspeed,ast2400-bt-bmc";
+		compatible = "aspeed,ast2400-ibt-bmc";
 		reg = <0x1e789140 0x18>;
 		reg = <0x1e789140 0x18>;
 		interrupts = <8>;
 		interrupts = <8>;
 	};
 	};

+ 5 - 0
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt

@@ -43,6 +43,9 @@ Optional properties:
   reset signal present internally in some host controller IC designs.
   reset signal present internally in some host controller IC designs.
   See Documentation/devicetree/bindings/reset/reset.txt for details.
   See Documentation/devicetree/bindings/reset/reset.txt for details.
 
 
+* reset-names: request name for using "resets" property. Must be "reset".
+	(It will be used together with "resets" property.)
+
 * clocks: from common clock binding: handle to biu and ciu clocks for the
 * clocks: from common clock binding: handle to biu and ciu clocks for the
   bus interface unit clock and the card interface unit clock.
   bus interface unit clock and the card interface unit clock.
 
 
@@ -103,6 +106,8 @@ board specific portions as listed below.
 		interrupts = <0 75 0>;
 		interrupts = <0 75 0>;
 		#address-cells = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		#size-cells = <0>;
+		resets = <&rst 20>;
+		reset-names = "reset";
 	};
 	};
 
 
 [board specific internal DMA resources]
 [board specific internal DMA resources]

+ 8 - 3
Documentation/devicetree/bindings/pci/rockchip-pcie.txt

@@ -26,13 +26,16 @@ Required properties:
 	- "sys"
 	- "sys"
 	- "legacy"
 	- "legacy"
 	- "client"
 	- "client"
-- resets: Must contain five entries for each entry in reset-names.
+- resets: Must contain seven entries for each entry in reset-names.
 	   See ../reset/reset.txt for details.
 	   See ../reset/reset.txt for details.
 - reset-names: Must include the following names
 - reset-names: Must include the following names
 	- "core"
 	- "core"
 	- "mgmt"
 	- "mgmt"
 	- "mgmt-sticky"
 	- "mgmt-sticky"
 	- "pipe"
 	- "pipe"
+	- "pm"
+	- "aclk"
+	- "pclk"
 - pinctrl-names : The pin control state names
 - pinctrl-names : The pin control state names
 - pinctrl-0: The "default" pinctrl state
 - pinctrl-0: The "default" pinctrl state
 - #interrupt-cells: specifies the number of cells needed to encode an
 - #interrupt-cells: specifies the number of cells needed to encode an
@@ -86,8 +89,10 @@ pcie0: pcie@f8000000 {
 	reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
 	reg = <0x0 0xf8000000 0x0 0x2000000>, <0x0 0xfd000000 0x0 0x1000000>;
 	reg-names = "axi-base", "apb-base";
 	reg-names = "axi-base", "apb-base";
 	resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
 	resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
-		 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
-	reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+		 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+		 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+	reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+		      "pm", "pclk", "aclk";
 	phys = <&pcie_phy>;
 	phys = <&pcie_phy>;
 	phy-names = "pcie-phy";
 	phy-names = "pcie-phy";
 	pinctrl-names = "default";
 	pinctrl-names = "default";

+ 5 - 5
Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt

@@ -14,11 +14,6 @@ Required properies:
  - #size-cells	: The value of this property must be 1
  - #size-cells	: The value of this property must be 1
  - ranges	: defines mapping between pin controller node (parent) to
  - ranges	: defines mapping between pin controller node (parent) to
    gpio-bank node (children).
    gpio-bank node (children).
- - interrupt-parent: phandle of the interrupt parent to which the external
-   GPIO interrupts are forwarded to.
- - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
-   which includes IRQ mux selection register, and the offset of the IRQ mux
-   selection register.
  - pins-are-numbered: Specify the subnodes are using numbered pinmux to
  - pins-are-numbered: Specify the subnodes are using numbered pinmux to
    specify pins.
    specify pins.
 
 
@@ -37,6 +32,11 @@ Required properties:
 
 
 Optional properties:
 Optional properties:
  - reset:	  : Reference to the reset controller
  - reset:	  : Reference to the reset controller
+ - interrupt-parent: phandle of the interrupt parent to which the external
+   GPIO interrupts are forwarded to.
+ - st,syscfg: Should be phandle/offset pair. The phandle to the syscon node
+   which includes IRQ mux selection register, and the offset of the IRQ mux
+   selection register.
 
 
 Example:
 Example:
 #include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
 #include <dt-bindings/pinctrl/stm32f429-pinfunc.h>

+ 1 - 1
Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt

@@ -12,7 +12,7 @@ Required properties:
 
 
 Optional properties:
 Optional properties:
 - ti,dmic: phandle for the OMAP dmic node if the machine have it connected
 - ti,dmic: phandle for the OMAP dmic node if the machine have it connected
-- ti,jack_detection: Need to be present if the board capable to detect jack
+- ti,jack-detection: Need to be present if the board capable to detect jack
   insertion, removal.
   insertion, removal.
 
 
 Available audio endpoints for the audio-routing table:
 Available audio endpoints for the audio-routing table:

+ 0 - 1
Documentation/filesystems/Locking

@@ -447,7 +447,6 @@ prototypes:
 	int (*flush) (struct file *);
 	int (*flush) (struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
 	int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
 	int (*fasync) (int, struct file *, int);
 	int (*fasync) (int, struct file *, int);
 	int (*lock) (struct file *, int, struct file_lock *);
 	int (*lock) (struct file *, int, struct file_lock *);
 	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
 	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,

+ 0 - 1
Documentation/filesystems/vfs.txt

@@ -828,7 +828,6 @@ struct file_operations {
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*release) (struct inode *, struct file *);
 	int (*release) (struct inode *, struct file *);
 	int (*fsync) (struct file *, loff_t, loff_t, int datasync);
 	int (*fsync) (struct file *, loff_t, loff_t, int datasync);
-	int (*aio_fsync) (struct kiocb *, int datasync);
 	int (*fasync) (int, struct file *, int);
 	int (*fasync) (int, struct file *, int);
 	int (*lock) (struct file *, int, struct file_lock *);
 	int (*lock) (struct file *, int, struct file_lock *);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);

+ 2 - 2
Documentation/i2c/i2c-topology

@@ -326,7 +326,7 @@ Two parent-locked sibling muxes
 
 
 This is a good topology.
 This is a good topology.
 
 
-                                   .--------.
+                                    .--------.
                    .----------.  .--| dev D1 |
                    .----------.  .--| dev D1 |
                    |  parent- |--'  '--------'
                    |  parent- |--'  '--------'
                 .--|  locked  |     .--------.
                 .--|  locked  |     .--------.
@@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes
 
 
 This is a good topology.
 This is a good topology.
 
 
-                                   .--------.
+                                    .--------.
                    .----------.  .--| dev D1 |
                    .----------.  .--| dev D1 |
                    |   mux-   |--'  '--------'
                    |   mux-   |--'  '--------'
                 .--|  locked  |     .--------.
                 .--|  locked  |     .--------.

+ 2 - 1
Documentation/networking/dsa/dsa.txt

@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
 Switch tagging protocols
 Switch tagging protocols
 ------------------------
 ------------------------
 
 
-DSA currently supports 4 different tagging protocols, and a tag-less mode as
+DSA currently supports 5 different tagging protocols, and a tag-less mode as
 well. The different protocols are implemented in:
 well. The different protocols are implemented in:
 
 
 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
 net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
 net/dsa/tag_dsa.c: Marvell's original DSA tag
 net/dsa/tag_dsa.c: Marvell's original DSA tag
 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
 net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
 net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
 
 
 The exact format of the tag protocol is vendor specific, but in general, they
 The exact format of the tag protocol is vendor specific, but in general, they
 all contain something which:
 all contain something which:

+ 11 - 0
Documentation/virtual/kvm/api.txt

@@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In
 conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
 conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
 such as migration.
 such as migration.
 
 
+When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the
+set of bits that KVM can return in struct kvm_clock_data's flag member.
+
+The only flag defined now is KVM_CLOCK_TSC_STABLE.  If set, the returned
+value is the exact kvmclock value seen by all VCPUs at the instant
+when KVM_GET_CLOCK was called.  If clear, the returned value is simply
+CLOCK_MONOTONIC plus a constant offset; the offset can be modified
+with KVM_SET_CLOCK.  KVM will try to make all VCPUs follow this clock,
+but the exact value read by each VCPU could differ, because the host
+TSC is not stable.
+
 struct kvm_clock_data {
 struct kvm_clock_data {
 	__u64 clock;  /* kvmclock current value */
 	__u64 clock;  /* kvmclock current value */
 	__u32 flags;
 	__u32 flags;

+ 11 - 1
Documentation/virtual/kvm/locking.txt

@@ -4,7 +4,17 @@ KVM Lock Overview
 1. Acquisition Orders
 1. Acquisition Orders
 ---------------------
 ---------------------
 
 
-(to be written)
+The acquisition orders for mutexes are as follows:
+
+- kvm->lock is taken outside vcpu->mutex
+
+- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
+
+- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
+  them together is quite rare.
+
+For spinlocks, kvm_lock is taken outside kvm->mmu_lock.  Everything
+else is a leaf: no other lock is taken inside the critical sections.
 
 
 2: Exception
 2: Exception
 ------------
 ------------

+ 20 - 1
MAINTAINERS

@@ -7084,6 +7084,7 @@ F:	drivers/scsi/53c700*
 LED SUBSYSTEM
 LED SUBSYSTEM
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Jacek Anaszewski <j.anaszewski@samsung.com>
 M:	Jacek Anaszewski <j.anaszewski@samsung.com>
+M:	Pavel Machek <pavel@ucw.cz>
 L:	linux-leds@vger.kernel.org
 L:	linux-leds@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 S:	Maintained
 S:	Maintained
@@ -7925,6 +7926,10 @@ F:	mm/
 MEMORY TECHNOLOGY DEVICES (MTD)
 MEMORY TECHNOLOGY DEVICES (MTD)
 M:	David Woodhouse <dwmw2@infradead.org>
 M:	David Woodhouse <dwmw2@infradead.org>
 M:	Brian Norris <computersforpeace@gmail.com>
 M:	Brian Norris <computersforpeace@gmail.com>
+M:	Boris Brezillon <boris.brezillon@free-electrons.com>
+M:	Marek Vasut <marek.vasut@gmail.com>
+M:	Richard Weinberger <richard@nod.at>
+M:	Cyrille Pitchen <cyrille.pitchen@atmel.com>
 L:	linux-mtd@lists.infradead.org
 L:	linux-mtd@lists.infradead.org
 W:	http://www.linux-mtd.infradead.org/
 W:	http://www.linux-mtd.infradead.org/
 Q:	http://patchwork.ozlabs.org/project/linux-mtd/list/
 Q:	http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -8053,6 +8058,7 @@ F:	drivers/infiniband/hw/mlx4/
 F:	include/linux/mlx4/
 F:	include/linux/mlx4/
 
 
 MELLANOX MLX5 core VPI driver
 MELLANOX MLX5 core VPI driver
+M:	Saeed Mahameed <saeedm@mellanox.com>
 M:	Matan Barak <matanb@mellanox.com>
 M:	Matan Barak <matanb@mellanox.com>
 M:	Leon Romanovsky <leonro@mellanox.com>
 M:	Leon Romanovsky <leonro@mellanox.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
@@ -9331,7 +9337,7 @@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
 M:	Keith Busch <keith.busch@intel.com>
 M:	Keith Busch <keith.busch@intel.com>
 L:	linux-pci@vger.kernel.org
 L:	linux-pci@vger.kernel.org
 S:	Supported
 S:	Supported
-F:	arch/x86/pci/vmd.c
+F:	drivers/pci/host/vmd.c
 
 
 PCIE DRIVER FOR ST SPEAR13XX
 PCIE DRIVER FOR ST SPEAR13XX
 M:	Pratyush Anand <pratyush.anand@gmail.com>
 M:	Pratyush Anand <pratyush.anand@gmail.com>
@@ -11404,6 +11410,17 @@ W:	http://www.st.com/spear
 S:	Maintained
 S:	Maintained
 F:	drivers/clk/spear/
 F:	drivers/clk/spear/
 
 
+SPI NOR SUBSYSTEM
+M:	Cyrille Pitchen <cyrille.pitchen@atmel.com>
+M:	Marek Vasut <marek.vasut@gmail.com>
+L:	linux-mtd@lists.infradead.org
+W:	http://www.linux-mtd.infradead.org/
+Q:	http://patchwork.ozlabs.org/project/linux-mtd/list/
+T:	git git://github.com/spi-nor/linux.git
+S:	Maintained
+F:	drivers/mtd/spi-nor/
+F:	include/linux/mtd/spi-nor.h
+
 SPI SUBSYSTEM
 SPI SUBSYSTEM
 M:	Mark Brown <broonie@kernel.org>
 M:	Mark Brown <broonie@kernel.org>
 L:	linux-spi@vger.kernel.org
 L:	linux-spi@vger.kernel.org
@@ -12783,6 +12800,7 @@ F:	include/uapi/linux/virtio_console.h
 
 
 VIRTIO CORE, NET AND BLOCK DRIVERS
 VIRTIO CORE, NET AND BLOCK DRIVERS
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 M:	"Michael S. Tsirkin" <mst@redhat.com>
+M:	Jason Wang <jasowang@redhat.com>
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 S:	Maintained
 F:	Documentation/devicetree/bindings/virtio/
 F:	Documentation/devicetree/bindings/virtio/
@@ -12813,6 +12831,7 @@ F:	include/uapi/linux/virtio_gpu.h
 
 
 VIRTIO HOST (VHOST)
 VIRTIO HOST (VHOST)
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 M:	"Michael S. Tsirkin" <mst@redhat.com>
+M:	Jason Wang <jasowang@redhat.com>
 L:	kvm@vger.kernel.org
 L:	kvm@vger.kernel.org
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org

+ 10 - 7
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 9
 PATCHLEVEL = 9
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
 NAME = Psychotic Stoned Sheep
 NAME = Psychotic Stoned Sheep
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -370,7 +370,7 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL	=
 CFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 LDFLAGS_vmlinux =
 LDFLAGS_vmlinux =
-CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
 CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
 CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
 
 
 
 
@@ -399,11 +399,12 @@ KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
 		   -fno-strict-aliasing -fno-common \
 		   -fno-strict-aliasing -fno-common \
 		   -Werror-implicit-function-declaration \
 		   -Werror-implicit-function-declaration \
 		   -Wno-format-security \
 		   -Wno-format-security \
-		   -std=gnu89
+		   -std=gnu89 $(call cc-option,-fno-PIE)
+
 
 
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS   := -D__ASSEMBLY__
+KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
@@ -620,7 +621,6 @@ ARCH_CFLAGS :=
 include arch/$(SRCARCH)/Makefile
 include arch/$(SRCARCH)/Makefile
 
 
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
-KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
 
 
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -629,15 +629,18 @@ KBUILD_CFLAGS	+= $(call cc-option,-fdata-sections,)
 endif
 endif
 
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= -Os
+KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
 else
 else
 ifdef CONFIG_PROFILE_ALL_BRANCHES
 ifdef CONFIG_PROFILE_ALL_BRANCHES
-KBUILD_CFLAGS	+= -O2
+KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,)
 else
 else
 KBUILD_CFLAGS   += -O2
 KBUILD_CFLAGS   += -O2
 endif
 endif
 endif
 endif
 
 
+KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
+			$(call cc-disable-warning,maybe-uninitialized,))
+
 # Tell gcc to never replace conditional load with a non-conditional one
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
 KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
 
 

+ 6 - 1
arch/arc/Makefile

@@ -50,6 +50,9 @@ atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
 
 
 cflags-$(atleast_gcc44)			+= -fsection-anchors
 cflags-$(atleast_gcc44)			+= -fsection-anchors
 
 
+cflags-$(CONFIG_ARC_HAS_LLSC)		+= -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE)		+= -mswape
+
 ifdef CONFIG_ISA_ARCV2
 ifdef CONFIG_ISA_ARCV2
 
 
 ifndef CONFIG_ARC_HAS_LL64
 ifndef CONFIG_ARC_HAS_LL64
@@ -68,7 +71,9 @@ cflags-$(CONFIG_ARC_DW2_UNWIND)		+= -fasynchronous-unwind-tables $(cfi)
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 # Generic build system uses -O2, we want -O3
 # Generic build system uses -O2, we want -O3
 # Note: No need to add to cflags-y as that happens anyways
 # Note: No need to add to cflags-y as that happens anyways
-ARCH_CFLAGS += -O3
+#
+# Disable the false maybe-uninitialized warings gcc spits out at -O3
+ARCH_CFLAGS += -O3 $(call cc-disable-warning,maybe-uninitialized,)
 endif
 endif
 
 
 # small data is default for elf32 tool-chain. If not usable, disable it
 # small data is default for elf32 tool-chain. If not usable, disable it

+ 1 - 1
arch/arc/boot/dts/axc001.dtsi

@@ -71,7 +71,7 @@
 			reg-io-width = <4>;
 			reg-io-width = <4>;
 		};
 		};
 
 
-		arcpmu0: pmu {
+		arcpct0: pct {
 			compatible = "snps,arc700-pct";
 			compatible = "snps,arc700-pct";
 		};
 		};
 	};
 	};

+ 1 - 1
arch/arc/boot/dts/nsim_700.dts

@@ -69,7 +69,7 @@
 			};
 			};
 		};
 		};
 
 
-		arcpmu0: pmu {
+		arcpct0: pct {
 			compatible = "snps,arc700-pct";
 			compatible = "snps,arc700-pct";
 		};
 		};
 	};
 	};

+ 4 - 0
arch/arc/boot/dts/nsimosci.dts

@@ -83,5 +83,9 @@
 			reg = <0xf0003000 0x44>;
 			reg = <0xf0003000 0x44>;
 			interrupts = <7>;
 			interrupts = <7>;
 		};
 		};
+
+		arcpct0: pct {
+			compatible = "snps,arc700-pct";
+		};
 	};
 	};
 };
 };

+ 1 - 0
arch/arc/configs/nsim_700_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsim_hs_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsim_hs_smp_defconfig

@@ -12,6 +12,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsimosci_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 0
arch/arc/configs/nsimosci_hs_defconfig

@@ -14,6 +14,7 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y

+ 1 - 2
arch/arc/configs/nsimosci_hs_smp_defconfig

@@ -10,6 +10,7 @@ CONFIG_IKCONFIG_PROC=y
 # CONFIG_PID_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULES=y
@@ -34,7 +35,6 @@ CONFIG_INET=y
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS=y
@@ -72,7 +72,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HWMON is not set
 # CONFIG_HWMON is not set
 CONFIG_DRM=y
 CONFIG_DRM=y
 CONFIG_DRM_ARCPGU=y
 CONFIG_DRM_ARCPGU=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set

+ 2 - 0
arch/arc/include/asm/arcregs.h

@@ -43,12 +43,14 @@
 #define STATUS_AE_BIT		5	/* Exception active */
 #define STATUS_AE_BIT		5	/* Exception active */
 #define STATUS_DE_BIT		6	/* PC is in delay slot */
 #define STATUS_DE_BIT		6	/* PC is in delay slot */
 #define STATUS_U_BIT		7	/* User/Kernel mode */
 #define STATUS_U_BIT		7	/* User/Kernel mode */
+#define STATUS_Z_BIT            11
 #define STATUS_L_BIT		12	/* Loop inhibit */
 #define STATUS_L_BIT		12	/* Loop inhibit */
 
 
 /* These masks correspond to the status word(STATUS_32) bits */
 /* These masks correspond to the status word(STATUS_32) bits */
 #define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
 #define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
 #define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
 #define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
 #define STATUS_U_MASK		(1<<STATUS_U_BIT)
 #define STATUS_U_MASK		(1<<STATUS_U_BIT)
+#define STATUS_Z_MASK		(1<<STATUS_Z_BIT)
 #define STATUS_L_MASK		(1<<STATUS_L_BIT)
 #define STATUS_L_MASK		(1<<STATUS_L_BIT)
 
 
 /*
 /*

+ 2 - 2
arch/arc/include/asm/smp.h

@@ -37,9 +37,9 @@ extern const char *arc_platform_smp_cpuinfo(void);
  * API expected BY platform smp code (FROM arch smp code)
  * API expected BY platform smp code (FROM arch smp code)
  *
  *
  * smp_ipi_irq_setup:
  * smp_ipi_irq_setup:
- *	Takes @cpu and @irq to which the arch-common ISR is hooked up
+ *	Takes @cpu and @hwirq to which the arch-common ISR is hooked up
  */
  */
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
 
 
 /*
 /*
  * struct plat_smp_ops	- SMP callbacks provided by platform to ARC SMP
  * struct plat_smp_ops	- SMP callbacks provided by platform to ARC SMP

+ 2 - 0
arch/arc/kernel/devtree.c

@@ -31,6 +31,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
 		arc_base_baud = 166666666;	/* Fixed 166.6MHz clk (TB10x) */
 		arc_base_baud = 166666666;	/* Fixed 166.6MHz clk (TB10x) */
 	else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
 	else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
 		arc_base_baud = 33333333;	/* Fixed 33MHz clk (AXS10x) */
 		arc_base_baud = 33333333;	/* Fixed 33MHz clk (AXS10x) */
+	else if (of_flat_dt_is_compatible(dt_root, "ezchip,arc-nps"))
+		arc_base_baud = 800000000;      /* Fixed 800MHz clk (NPS) */
 	else
 	else
 		arc_base_baud = 50000000;	/* Fixed default 50MHz */
 		arc_base_baud = 50000000;	/* Fixed default 50MHz */
 }
 }

+ 20 - 12
arch/arc/kernel/mcip.c

@@ -181,6 +181,8 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	cpumask_t online;
 	cpumask_t online;
+	unsigned int destination_bits;
+	unsigned int distribution_mode;
 
 
 	/* errout if no online cpu per @cpumask */
 	/* errout if no online cpu per @cpumask */
 	if (!cpumask_and(&online, cpumask, cpu_online_mask))
 	if (!cpumask_and(&online, cpumask, cpu_online_mask))
@@ -188,8 +190,15 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 
 
 	raw_spin_lock_irqsave(&mcip_lock, flags);
 	raw_spin_lock_irqsave(&mcip_lock, flags);
 
 
-	idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
-	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+	destination_bits = cpumask_bits(&online)[0];
+	idu_set_dest(data->hwirq, destination_bits);
+
+	if (ffs(destination_bits) == fls(destination_bits))
+		distribution_mode = IDU_M_DISTRI_DEST;
+	else
+		distribution_mode = IDU_M_DISTRI_RR;
+
+	idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
 
 
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 
 
@@ -207,16 +216,15 @@ static struct irq_chip idu_irq_chip = {
 
 
 };
 };
 
 
-static int idu_first_irq;
+static irq_hw_number_t idu_first_hwirq;
 
 
 static void idu_cascade_isr(struct irq_desc *desc)
 static void idu_cascade_isr(struct irq_desc *desc)
 {
 {
-	struct irq_domain *domain = irq_desc_get_handler_data(desc);
-	unsigned int core_irq = irq_desc_get_irq(desc);
-	unsigned int idu_irq;
+	struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+	irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
+	irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
 
 
-	idu_irq = core_irq - idu_first_irq;
-	generic_handle_irq(irq_find_mapping(domain, idu_irq));
+	generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
 }
 }
 
 
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -282,7 +290,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
 	struct irq_domain *domain;
 	struct irq_domain *domain;
 	/* Read IDU BCR to confirm nr_irqs */
 	/* Read IDU BCR to confirm nr_irqs */
 	int nr_irqs = of_irq_count(intc);
 	int nr_irqs = of_irq_count(intc);
-	int i, irq;
+	int i, virq;
 	struct mcip_bcr mp;
 	struct mcip_bcr mp;
 
 
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -303,11 +311,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
 		 * however we need it to get the parent virq and set IDU handler
 		 * however we need it to get the parent virq and set IDU handler
 		 * as first level isr
 		 * as first level isr
 		 */
 		 */
-		irq = irq_of_parse_and_map(intc, i);
+		virq = irq_of_parse_and_map(intc, i);
 		if (!i)
 		if (!i)
-			idu_first_irq = irq;
+			idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
 
 
-		irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
+		irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
 	}
 	}
 
 
 	__mcip_cmd(CMD_IDU_ENABLE, 0);
 	__mcip_cmd(CMD_IDU_ENABLE, 0);

+ 11 - 9
arch/arc/kernel/process.c

@@ -43,8 +43,8 @@ SYSCALL_DEFINE0(arc_gettls)
 
 
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 {
 {
-	int uval;
-	int ret;
+	struct pt_regs *regs = current_pt_regs();
+	int uval = -EFAULT;
 
 
 	/*
 	/*
 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -54,24 +54,26 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 	 */
 	 */
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
 
 
+	/* Z indicates to userspace if operation succeded */
+	regs->status32 &= ~STATUS_Z_MASK;
+
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	preempt_disable();
 	preempt_disable();
 
 
-	ret = __get_user(uval, uaddr);
-	if (ret)
+	if (__get_user(uval, uaddr))
 		goto done;
 		goto done;
 
 
-	if (uval != expected)
-		ret = -EAGAIN;
-	else
-		ret = __put_user(new, uaddr);
+	if (uval == expected) {
+		if (!__put_user(new, uaddr))
+			regs->status32 |= STATUS_Z_MASK;
+	}
 
 
 done:
 done:
 	preempt_enable();
 	preempt_enable();
 
 
-	return ret;
+	return uval;
 }
 }
 
 
 void arch_cpu_idle(void)
 void arch_cpu_idle(void)

+ 15 - 8
arch/arc/kernel/smp.c

@@ -22,6 +22,7 @@
 #include <linux/atomic.h>
 #include <linux/atomic.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
+#include <linux/irqdomain.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/mach_desc.h>
 #include <asm/mach_desc.h>
@@ -67,11 +68,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	int i;
 	int i;
 
 
 	/*
 	/*
-	 * Initialise the present map, which describes the set of CPUs
-	 * actually populated at the present time.
+	 * if platform didn't set the present map already, do it now
+	 * boot cpu is set to present already by init/main.c
 	 */
 	 */
-	for (i = 0; i < max_cpus; i++)
-		set_cpu_present(i, true);
+	if (num_present_cpus() <= 1) {
+		for (i = 0; i < max_cpus; i++)
+			set_cpu_present(i, true);
+	}
 }
 }
 
 
 void __init smp_cpus_done(unsigned int max_cpus)
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -351,20 +354,24 @@ irqreturn_t do_IPI(int irq, void *dev_id)
  */
  */
 static DEFINE_PER_CPU(int, ipi_dev);
 static DEFINE_PER_CPU(int, ipi_dev);
 
 
-int smp_ipi_irq_setup(int cpu, int irq)
+int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
 {
 {
 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
+	unsigned int virq = irq_find_mapping(NULL, hwirq);
+
+	if (!virq)
+		panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
 
 
 	/* Boot cpu calls request, all call enable */
 	/* Boot cpu calls request, all call enable */
 	if (!cpu) {
 	if (!cpu) {
 		int rc;
 		int rc;
 
 
-		rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+		rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
 		if (rc)
 		if (rc)
-			panic("Percpu IRQ request failed for %d\n", irq);
+			panic("Percpu IRQ request failed for %u\n", virq);
 	}
 	}
 
 
-	enable_percpu_irq(irq, 0);
+	enable_percpu_irq(virq, 0);
 
 
 	return 0;
 	return 0;
 }
 }

+ 11 - 8
arch/arc/kernel/time.c

@@ -152,14 +152,17 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
 		cycle_t  full;
 		cycle_t  full;
 	} stamp;
 	} stamp;
 
 
-
-	__asm__ __volatile(
-	"1:						\n"
-	"	lr		%0, [AUX_RTC_LOW]	\n"
-	"	lr		%1, [AUX_RTC_HIGH]	\n"
-	"	lr		%2, [AUX_RTC_CTRL]	\n"
-	"	bbit0.nt	%2, 31, 1b		\n"
-	: "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+	/*
+	 * hardware has an internal state machine which tracks readout of
+	 * low/high and updates the CTRL.status if
+	 *  - interrupt/exception taken between the two reads
+	 *  - high increments after low has been read
+	 */
+	do {
+		stamp.low = read_aux_reg(AUX_RTC_LOW);
+		stamp.high = read_aux_reg(AUX_RTC_HIGH);
+		status = read_aux_reg(AUX_RTC_CTRL);
+	} while (!(status & _BITUL(31)));
 
 
 	return stamp.full;
 	return stamp.full;
 }
 }

+ 26 - 0
arch/arc/mm/dma.c

@@ -105,6 +105,31 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
 	__free_pages(page, get_order(size));
 	__free_pages(page, get_order(size));
 }
 }
 
 
+static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			unsigned long attrs)
+{
+	unsigned long user_count = vma_pages(vma);
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+	unsigned long off = vma->vm_pgoff;
+	int ret = -ENXIO;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
+	if (off < count && user_count <= (count - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start,
+				      pfn + off,
+				      user_count << PAGE_SHIFT,
+				      vma->vm_page_prot);
+	}
+
+	return ret;
+}
+
 /*
 /*
  * streaming DMA Mapping API...
  * streaming DMA Mapping API...
  * CPU accesses page via normal paddr, thus needs to explicitly made
  * CPU accesses page via normal paddr, thus needs to explicitly made
@@ -193,6 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
 struct dma_map_ops arc_dma_ops = {
 struct dma_map_ops arc_dma_ops = {
 	.alloc			= arc_dma_alloc,
 	.alloc			= arc_dma_alloc,
 	.free			= arc_dma_free,
 	.free			= arc_dma_free,
+	.mmap			= arc_dma_mmap,
 	.map_page		= arc_dma_map_page,
 	.map_page		= arc_dma_map_page,
 	.map_sg			= arc_dma_map_sg,
 	.map_sg			= arc_dma_map_sg,
 	.sync_single_for_device	= arc_dma_sync_single_for_device,
 	.sync_single_for_device	= arc_dma_sync_single_for_device,

+ 0 - 6
arch/arc/plat-eznps/smp.c

@@ -140,16 +140,10 @@ static void eznps_init_per_cpu(int cpu)
 	mtm_enable_core(cpu);
 	mtm_enable_core(cpu);
 }
 }
 
 
-static void eznps_ipi_clear(int irq)
-{
-	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
-}
-
 struct plat_smp_ops plat_smp_ops = {
 struct plat_smp_ops plat_smp_ops = {
 	.info		= smp_cpuinfo_buf,
 	.info		= smp_cpuinfo_buf,
 	.init_early_smp	= eznps_init_cpumasks,
 	.init_early_smp	= eznps_init_cpumasks,
 	.cpu_kick	= eznps_smp_wakeup_cpu,
 	.cpu_kick	= eznps_smp_wakeup_cpu,
 	.ipi_send	= eznps_ipi_send,
 	.ipi_send	= eznps_ipi_send,
 	.init_per_cpu	= eznps_init_per_cpu,
 	.init_per_cpu	= eznps_init_per_cpu,
-	.ipi_clear	= eznps_ipi_clear,
 };
 };

+ 7 - 7
arch/arm/boot/dts/imx53-qsb.dts

@@ -64,8 +64,8 @@
 			};
 			};
 
 
 			ldo3_reg: ldo3 {
 			ldo3_reg: ldo3 {
-				regulator-min-microvolt = <600000>;
-				regulator-max-microvolt = <1800000>;
+				regulator-min-microvolt = <1725000>;
+				regulator-max-microvolt = <3300000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 
 
@@ -76,8 +76,8 @@
 			};
 			};
 
 
 			ldo5_reg: ldo5 {
 			ldo5_reg: ldo5 {
-				regulator-min-microvolt = <1725000>;
-				regulator-max-microvolt = <3300000>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 
 
@@ -100,14 +100,14 @@
 			};
 			};
 
 
 			ldo9_reg: ldo9 {
 			ldo9_reg: ldo9 {
-				regulator-min-microvolt = <1200000>;
+				regulator-min-microvolt = <1250000>;
 				regulator-max-microvolt = <3600000>;
 				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 
 
 			ldo10_reg: ldo10 {
 			ldo10_reg: ldo10 {
-				regulator-min-microvolt = <1250000>;
-				regulator-max-microvolt = <3650000>;
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3600000>;
 				regulator-always-on;
 				regulator-always-on;
 			};
 			};
 		};
 		};

+ 5 - 0
arch/arm/boot/dts/logicpd-som-lv.dtsi

@@ -13,6 +13,11 @@
 		};
 		};
 	};
 	};
 
 
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x80000000 0>;
+	};
+
 	wl12xx_vmmc: wl12xx_vmmc {
 	wl12xx_vmmc: wl12xx_vmmc {
 		compatible = "regulator-fixed";
 		compatible = "regulator-fixed";
 		regulator-name = "vwl1271";
 		regulator-name = "vwl1271";

+ 2 - 2
arch/arm/boot/dts/logicpd-torpedo-som.dtsi

@@ -13,9 +13,9 @@
 		};
 		};
 	};
 	};
 
 
-	memory@0 {
+	memory@80000000 {
 		device_type = "memory";
 		device_type = "memory";
-		reg = <0 0>;
+		reg = <0x80000000 0>;
 	};
 	};
 
 
 	leds {
 	leds {

+ 4 - 3
arch/arm/boot/dts/omap5-board-common.dtsi

@@ -124,6 +124,7 @@
 		compatible = "ti,abe-twl6040";
 		compatible = "ti,abe-twl6040";
 		ti,model = "omap5-uevm";
 		ti,model = "omap5-uevm";
 
 
+		ti,jack-detection;
 		ti,mclk-freq = <19200000>;
 		ti,mclk-freq = <19200000>;
 
 
 		ti,mcpdm = <&mcpdm>;
 		ti,mcpdm = <&mcpdm>;
@@ -415,7 +416,7 @@
 			ti,backup-battery-charge-high-current;
 			ti,backup-battery-charge-high-current;
 		};
 		};
 
 
-		gpadc {
+		gpadc: gpadc {
 			compatible = "ti,palmas-gpadc";
 			compatible = "ti,palmas-gpadc";
 			interrupts = <18 0
 			interrupts = <18 0
 				      16 0
 				      16 0
@@ -475,8 +476,8 @@
 				smps6_reg: smps6 {
 				smps6_reg: smps6 {
 					/* VDD_DDR3 - over VDD_SMPS6 */
 					/* VDD_DDR3 - over VDD_SMPS6 */
 					regulator-name = "smps6";
 					regulator-name = "smps6";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
+					regulator-min-microvolt = <1350000>;
+					regulator-max-microvolt = <1350000>;
 					regulator-always-on;
 					regulator-always-on;
 					regulator-boot-on;
 					regulator-boot-on;
 				};
 				};

+ 1 - 1
arch/arm/boot/dts/stih410-b2260.dts

@@ -74,7 +74,7 @@
 		/* Low speed expansion connector */
 		/* Low speed expansion connector */
 		spi0: spi@9844000 {
 		spi0: spi@9844000 {
 			label = "LS-SPI0";
 			label = "LS-SPI0";
-			cs-gpio = <&pio30 3 0>;
+			cs-gpios = <&pio30 3 0>;
 			status = "okay";
 			status = "okay";
 		};
 		};
 
 

+ 4 - 0
arch/arm/boot/dts/sun8i-a23-a33.dtsi

@@ -282,11 +282,15 @@
 			uart1_pins_a: uart1@0 {
 			uart1_pins_a: uart1@0 {
 				allwinner,pins = "PG6", "PG7";
 				allwinner,pins = "PG6", "PG7";
 				allwinner,function = "uart1";
 				allwinner,function = "uart1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 			};
 
 
 			uart1_pins_cts_rts_a: uart1-cts-rts@0 {
 			uart1_pins_cts_rts_a: uart1-cts-rts@0 {
 				allwinner,pins = "PG8", "PG9";
 				allwinner,pins = "PG8", "PG9";
 				allwinner,function = "uart1";
 				allwinner,function = "uart1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 			};
 
 
 			mmc0_pins_a: mmc0@0 {
 			mmc0_pins_a: mmc0@0 {

+ 1 - 0
arch/arm/include/asm/kvm_asm.h

@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 

+ 3 - 0
arch/arm/include/asm/kvm_host.h

@@ -57,6 +57,9 @@ struct kvm_arch {
 	/* VTTBR value associated with below pgd and vmid */
 	/* VTTBR value associated with below pgd and vmid */
 	u64    vttbr;
 	u64    vttbr;
 
 
+	/* The last vcpu id that ran on each physical CPU */
+	int __percpu *last_vcpu_ran;
+
 	/* Timer */
 	/* Timer */
 	struct arch_timer_kvm	timer;
 	struct arch_timer_kvm	timer;
 
 

+ 1 - 0
arch/arm/include/asm/kvm_hyp.h

@@ -71,6 +71,7 @@
 #define ICIALLUIS	__ACCESS_CP15(c7, 0, c1, 0)
 #define ICIALLUIS	__ACCESS_CP15(c7, 0, c1, 0)
 #define ATS1CPR		__ACCESS_CP15(c7, 0, c8, 0)
 #define ATS1CPR		__ACCESS_CP15(c7, 0, c8, 0)
 #define TLBIALLIS	__ACCESS_CP15(c8, 0, c3, 0)
 #define TLBIALLIS	__ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL		__ACCESS_CP15(c8, 0, c7, 0)
 #define TLBIALLNSNHIS	__ACCESS_CP15(c8, 4, c3, 4)
 #define TLBIALLNSNHIS	__ACCESS_CP15(c8, 4, c3, 4)
 #define PRRR		__ACCESS_CP15(c10, 0, c2, 0)
 #define PRRR		__ACCESS_CP15(c10, 0, c2, 0)
 #define NMRR		__ACCESS_CP15(c10, 0, c2, 1)
 #define NMRR		__ACCESS_CP15(c10, 0, c2, 1)

+ 20 - 0
arch/arm/kernel/traps.c

@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
 		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
 		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
 }
 }
 
 
+void dump_backtrace_stm(u32 *stack, u32 instruction)
+{
+	char str[80], *p;
+	unsigned int x;
+	int reg;
+
+	for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
+		if (instruction & BIT(reg)) {
+			p += sprintf(p, " r%d:%08x", reg, *stack--);
+			if (++x == 6) {
+				x = 0;
+				p = str;
+				printk("%s\n", str);
+			}
+		}
+	}
+	if (p != str)
+		printk("%s\n", str);
+}
+
 #ifndef CONFIG_ARM_UNWIND
 #ifndef CONFIG_ARM_UNWIND
 /*
 /*
  * Stack pointers should always be within the kernels view of
  * Stack pointers should always be within the kernels view of

+ 5 - 0
arch/arm/kernel/vmlinux-xip.lds.S

@@ -3,6 +3,9 @@
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  */
  */
 
 
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
 #include <asm-generic/vmlinux.lds.h>
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
@@ -223,6 +226,8 @@ SECTIONS
 		. = ALIGN(PAGE_SIZE);
 		. = ALIGN(PAGE_SIZE);
 		__init_end = .;
 		__init_end = .;
 
 
+		*(.data..ro_after_init)
+
 		NOSAVE_DATA
 		NOSAVE_DATA
 		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 		READ_MOSTLY_DATA(L1_CACHE_BYTES)
 		READ_MOSTLY_DATA(L1_CACHE_BYTES)

+ 26 - 1
arch/arm/kvm/arm.c

@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
  */
  */
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
 {
-	int ret = 0;
+	int ret, cpu;
 
 
 	if (type)
 	if (type)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+	if (!kvm->arch.last_vcpu_ran)
+		return -ENOMEM;
+
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
 	ret = kvm_alloc_stage2_pgd(kvm);
 	ret = kvm_alloc_stage2_pgd(kvm);
 	if (ret)
 	if (ret)
 		goto out_fail_alloc;
 		goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 out_free_stage2_pgd:
 out_free_stage2_pgd:
 	kvm_free_stage2_pgd(kvm);
 	kvm_free_stage2_pgd(kvm);
 out_fail_alloc:
 out_fail_alloc:
+	free_percpu(kvm->arch.last_vcpu_ran);
+	kvm->arch.last_vcpu_ran = NULL;
 	return ret;
 	return ret;
 }
 }
 
 
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 {
 	int i;
 	int i;
 
 
+	free_percpu(kvm->arch.last_vcpu_ran);
+	kvm->arch.last_vcpu_ran = NULL;
+
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 		if (kvm->vcpus[i]) {
 		if (kvm->vcpus[i]) {
 			kvm_arch_vcpu_free(kvm->vcpus[i]);
 			kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 {
+	int *last_ran;
+
+	last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+	/*
+	 * We might get preempted before the vCPU actually runs, but
+	 * over-invalidation doesn't affect correctness.
+	 */
+	if (*last_ran != vcpu->vcpu_id) {
+		kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+		*last_ran = vcpu->vcpu_id;
+	}
+
 	vcpu->cpu = cpu;
 	vcpu->cpu = cpu;
 	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 
 

+ 15 - 0
arch/arm/kvm/hyp/tlb.c

@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 	__kvm_tlb_flush_vmid(kvm);
 	__kvm_tlb_flush_vmid(kvm);
 }
 }
 
 
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+	/* Switch to requested VMID */
+	write_sysreg(kvm->arch.vttbr, VTTBR);
+	isb();
+
+	write_sysreg(0, TLBIALL);
+	dsb(nsh);
+	isb();
+
+	write_sysreg(0, VTTBR);
+}
+
 void __hyp_text __kvm_flush_vm_context(void)
 void __hyp_text __kvm_flush_vm_context(void)
 {
 {
 	write_sysreg(0, TLBIALLNSNHIS);
 	write_sysreg(0, TLBIALLNSNHIS);

+ 3 - 34
arch/arm/lib/backtrace.S

@@ -10,6 +10,7 @@
  * 27/03/03 Ian Molton Clean up CONFIG_CPU
  * 27/03/03 Ian Molton Clean up CONFIG_CPU
  *
  *
  */
  */
+#include <linux/kern_levels.h>
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
 		.text
 		.text
@@ -83,13 +84,13 @@ for_each_frame:	tst	frame, mask		@ Check for address exceptions
 		teq	r3, r1, lsr #11
 		teq	r3, r1, lsr #11
 		ldreq	r0, [frame, #-8]	@ get sp
 		ldreq	r0, [frame, #-8]	@ get sp
 		subeq	r0, r0, #4		@ point at the last arg
 		subeq	r0, r0, #4		@ point at the last arg
-		bleq	.Ldumpstm		@ dump saved registers
+		bleq	dump_backtrace_stm	@ dump saved registers
 
 
 1004:		ldr	r1, [sv_pc, #0]		@ if stmfd sp!, {..., fp, ip, lr, pc}
 1004:		ldr	r1, [sv_pc, #0]		@ if stmfd sp!, {..., fp, ip, lr, pc}
 		ldr	r3, .Ldsi		@ instruction exists,
 		ldr	r3, .Ldsi		@ instruction exists,
 		teq	r3, r1, lsr #11
 		teq	r3, r1, lsr #11
 		subeq	r0, frame, #16
 		subeq	r0, frame, #16
-		bleq	.Ldumpstm		@ dump saved registers
+		bleq	dump_backtrace_stm	@ dump saved registers
 
 
 		teq	sv_fp, #0		@ zero saved fp means
 		teq	sv_fp, #0		@ zero saved fp means
 		beq	no_frame		@ no further frames
 		beq	no_frame		@ no further frames
@@ -112,38 +113,6 @@ ENDPROC(c_backtrace)
 		.long	1004b, 1006b
 		.long	1004b, 1006b
 		.popsection
 		.popsection
 
 
-#define instr r4
-#define reg   r5
-#define stack r6
-
-.Ldumpstm:	stmfd	sp!, {instr, reg, stack, r7, lr}
-		mov	stack, r0
-		mov	instr, r1
-		mov	reg, #10
-		mov	r7, #0
-1:		mov	r3, #1
- ARM(		tst	instr, r3, lsl reg	)
- THUMB(		lsl	r3, reg			)
- THUMB(		tst	instr, r3		)
-		beq	2f
-		add	r7, r7, #1
-		teq	r7, #6
-		moveq	r7, #0
-		adr	r3, .Lcr
-		addne	r3, r3, #1		@ skip newline
-		ldr	r2, [stack], #-4
-		mov	r1, reg
-		adr	r0, .Lfp
-		bl	printk
-2:		subs	reg, reg, #1
-		bpl	1b
-		teq	r7, #0
-		adrne	r0, .Lcr
-		blne	printk
-		ldmfd	sp!, {instr, reg, stack, r7, pc}
-
-.Lfp:		.asciz	" r%d:%08x%s"
-.Lcr:		.asciz	"\n"
 .Lbad:		.asciz	"Backtrace aborted due to bad frame pointer <%p>\n"
 .Lbad:		.asciz	"Backtrace aborted due to bad frame pointer <%p>\n"
 		.align
 		.align
 .Ldsi:		.word	0xe92dd800 >> 11	@ stmfd sp!, {... fp, ip, lr, pc}
 .Ldsi:		.word	0xe92dd800 >> 11	@ stmfd sp!, {... fp, ip, lr, pc}

+ 1 - 0
arch/arm/mach-omap2/Kconfig

@@ -71,6 +71,7 @@ config SOC_AM43XX
 	select HAVE_ARM_TWD
 	select HAVE_ARM_TWD
 	select ARM_ERRATA_754322
 	select ARM_ERRATA_754322
 	select ARM_ERRATA_775420
 	select ARM_ERRATA_775420
+	select OMAP_INTERCONNECT
 
 
 config SOC_DRA7XX
 config SOC_DRA7XX
 	bool "TI DRA7XX"
 	bool "TI DRA7XX"

+ 11 - 5
arch/arm/mach-omap2/id.c

@@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void)
 
 
 #define OMAP3_SHOW_FEATURE(feat)		\
 #define OMAP3_SHOW_FEATURE(feat)		\
 	if (omap3_has_ ##feat())		\
 	if (omap3_has_ ##feat())		\
-		printk(#feat" ");
+		n += scnprintf(buf + n, sizeof(buf) - n, #feat " ");
 
 
 static void __init omap3_cpuinfo(void)
 static void __init omap3_cpuinfo(void)
 {
 {
 	const char *cpu_name;
 	const char *cpu_name;
+	char buf[64];
+	int n = 0;
+
+	memset(buf, 0, sizeof(buf));
 
 
 	/*
 	/*
 	 * OMAP3430 and OMAP3530 are assumed to be same.
 	 * OMAP3430 and OMAP3530 are assumed to be same.
@@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void)
 		cpu_name = "OMAP3503";
 		cpu_name = "OMAP3503";
 	}
 	}
 
 
-	sprintf(soc_name, "%s", cpu_name);
+	scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name);
 
 
 	/* Print verbose information */
 	/* Print verbose information */
-	pr_info("%s %s (", soc_name, soc_rev);
+	n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev);
 
 
 	OMAP3_SHOW_FEATURE(l2cache);
 	OMAP3_SHOW_FEATURE(l2cache);
 	OMAP3_SHOW_FEATURE(iva);
 	OMAP3_SHOW_FEATURE(iva);
@@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void)
 	OMAP3_SHOW_FEATURE(neon);
 	OMAP3_SHOW_FEATURE(neon);
 	OMAP3_SHOW_FEATURE(isp);
 	OMAP3_SHOW_FEATURE(isp);
 	OMAP3_SHOW_FEATURE(192mhz_clk);
 	OMAP3_SHOW_FEATURE(192mhz_clk);
-
-	printk(")\n");
+	if (*(buf + n - 1) == ' ')
+		n--;
+	n += scnprintf(buf + n, sizeof(buf) - n, ")\n");
+	pr_info("%s", buf);
 }
 }
 
 
 #define OMAP3_CHECK_FEATURE(status,feat)				\
 #define OMAP3_CHECK_FEATURE(status,feat)				\

+ 3 - 0
arch/arm/mach-omap2/prm3xxx.c

@@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
 	if (has_uart4) {
 	if (has_uart4) {
 		en_uart4_mask = OMAP3630_EN_UART4_MASK;
 		en_uart4_mask = OMAP3630_EN_UART4_MASK;
 		grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
 		grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
+	} else {
+		en_uart4_mask = 0;
+		grpsel_uart4_mask = 0;
 	}
 	}
 
 
 	/* Enable wakeups in PER */
 	/* Enable wakeups in PER */

+ 6 - 0
arch/arm/mach-omap2/voltage.c

@@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm,
 		return -ENODATA;
 		return -ENODATA;
 	}
 	}
 
 
+	if (!voltdm->volt_data) {
+		pr_err("%s: No voltage data defined for vdd_%s\n",
+			__func__, voltdm->name);
+		return -ENODATA;
+	}
+
 	/* Adjust voltage to the exact voltage from the OPP table */
 	/* Adjust voltage to the exact voltage from the OPP table */
 	for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
 	for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
 		if (voltdm->volt_data[i].volt_nominal >= target_volt) {
 		if (voltdm->volt_data[i].volt_nominal >= target_volt) {

+ 1 - 1
arch/arm/mm/dma-mapping.c

@@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void)
 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 	return 0;
 	return 0;
 }
 }
-fs_initcall(dma_debug_do_init);
+core_initcall(dma_debug_do_init);
 
 
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 
 

+ 1 - 1
arch/arm/mm/proc-v7m.S

@@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin)
 	ret	lr
 	ret	lr
 ENDPROC(cpu_cm7_proc_fin)
 ENDPROC(cpu_cm7_proc_fin)
 
 
-	.section ".text.init", #alloc, #execinstr
+	.section ".init.text", #alloc, #execinstr
 
 
 __v7m_cm7_setup:
 __v7m_cm7_setup:
 	mov	r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
 	mov	r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)

+ 2 - 2
arch/arm64/boot/dts/marvell/armada-37xx.dtsi

@@ -105,7 +105,7 @@
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 
-			nb_perih_clk: nb-periph-clk@13000{
+			nb_periph_clk: nb-periph-clk@13000 {
 				compatible = "marvell,armada-3700-periph-clock-nb";
 				compatible = "marvell,armada-3700-periph-clock-nb";
 				reg = <0x13000 0x100>;
 				reg = <0x13000 0x100>;
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
@@ -113,7 +113,7 @@
 				#clock-cells = <1>;
 				#clock-cells = <1>;
 			};
 			};
 
 
-			sb_perih_clk: sb-periph-clk@18000{
+			sb_periph_clk: sb-periph-clk@18000 {
 				compatible = "marvell,armada-3700-periph-clock-sb";
 				compatible = "marvell,armada-3700-periph-clock-sb";
 				reg = <0x18000 0x100>;
 				reg = <0x18000 0x100>;
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,

+ 3 - 3
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi

@@ -130,8 +130,8 @@
 				reg = <0x700600 0x50>;
 				reg = <0x700600 0x50>;
 				#address-cells = <0x1>;
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
 				#size-cells = <0x0>;
-				cell-index = <1>;
-				clocks = <&cps_syscon0 0 3>;
+				cell-index = <3>;
+				clocks = <&cps_syscon0 1 21>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 
@@ -140,7 +140,7 @@
 				reg = <0x700680 0x50>;
 				reg = <0x700680 0x50>;
 				#address-cells = <1>;
 				#address-cells = <1>;
 				#size-cells = <0>;
 				#size-cells = <0>;
-				cell-index = <2>;
+				cell-index = <4>;
 				clocks = <&cps_syscon0 1 21>;
 				clocks = <&cps_syscon0 1 21>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};

+ 5 - 2
arch/arm64/boot/dts/rockchip/rk3399.dtsi

@@ -300,8 +300,11 @@
 		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
 		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
 			  0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
 			  0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
 		resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
 		resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
-			 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>;
-		reset-names = "core", "mgmt", "mgmt-sticky", "pipe";
+			 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+			 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+			 <&cru SRST_A_PCIE>;
+		reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+			      "pm", "pclk", "aclk";
 		status = "disabled";
 		status = "disabled";
 
 
 		pcie0_intc: interrupt-controller {
 		pcie0_intc: interrupt-controller {

+ 1 - 1
arch/arm64/include/asm/alternative.h

@@ -1,7 +1,7 @@
 #ifndef __ASM_ALTERNATIVE_H
 #ifndef __ASM_ALTERNATIVE_H
 #define __ASM_ALTERNATIVE_H
 #define __ASM_ALTERNATIVE_H
 
 
-#include <asm/cpufeature.h>
+#include <asm/cpucaps.h>
 #include <asm/insn.h>
 #include <asm/insn.h>
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__

+ 40 - 0
arch/arm64/include/asm/cpucaps.h

@@ -0,0 +1,40 @@
+/*
+ * arch/arm64/include/asm/cpucaps.h
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPUCAPS_H
+#define __ASM_CPUCAPS_H
+
+#define ARM64_WORKAROUND_CLEAN_CACHE		0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
+#define ARM64_WORKAROUND_845719			2
+#define ARM64_HAS_SYSREG_GIC_CPUIF		3
+#define ARM64_HAS_PAN				4
+#define ARM64_HAS_LSE_ATOMICS			5
+#define ARM64_WORKAROUND_CAVIUM_23154		6
+#define ARM64_WORKAROUND_834220			7
+#define ARM64_HAS_NO_HW_PREFETCH		8
+#define ARM64_HAS_UAO				9
+#define ARM64_ALT_PAN_NOT_UAO			10
+#define ARM64_HAS_VIRT_HOST_EXTN		11
+#define ARM64_WORKAROUND_CAVIUM_27456		12
+#define ARM64_HAS_32BIT_EL0			13
+#define ARM64_HYP_OFFSET_LOW			14
+#define ARM64_MISMATCHED_CACHE_LINE_SIZE	15
+
+#define ARM64_NCAPS				16
+
+#endif /* __ASM_CPUCAPS_H */

+ 1 - 19
arch/arm64/include/asm/cpufeature.h

@@ -11,6 +11,7 @@
 
 
 #include <linux/jump_label.h>
 #include <linux/jump_label.h>
 
 
+#include <asm/cpucaps.h>
 #include <asm/hwcap.h>
 #include <asm/hwcap.h>
 #include <asm/sysreg.h>
 #include <asm/sysreg.h>
 
 
@@ -24,25 +25,6 @@
 #define MAX_CPU_FEATURES	(8 * sizeof(elf_hwcap))
 #define MAX_CPU_FEATURES	(8 * sizeof(elf_hwcap))
 #define cpu_feature(x)		ilog2(HWCAP_ ## x)
 #define cpu_feature(x)		ilog2(HWCAP_ ## x)
 
 
-#define ARM64_WORKAROUND_CLEAN_CACHE		0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
-#define ARM64_WORKAROUND_845719			2
-#define ARM64_HAS_SYSREG_GIC_CPUIF		3
-#define ARM64_HAS_PAN				4
-#define ARM64_HAS_LSE_ATOMICS			5
-#define ARM64_WORKAROUND_CAVIUM_23154		6
-#define ARM64_WORKAROUND_834220			7
-#define ARM64_HAS_NO_HW_PREFETCH		8
-#define ARM64_HAS_UAO				9
-#define ARM64_ALT_PAN_NOT_UAO			10
-#define ARM64_HAS_VIRT_HOST_EXTN		11
-#define ARM64_WORKAROUND_CAVIUM_27456		12
-#define ARM64_HAS_32BIT_EL0			13
-#define ARM64_HYP_OFFSET_LOW			14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE	15
-
-#define ARM64_NCAPS				16
-
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>

+ 1 - 0
arch/arm64/include/asm/kvm_asm.h

@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
 
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 

+ 3 - 0
arch/arm64/include/asm/kvm_host.h

@@ -62,6 +62,9 @@ struct kvm_arch {
 	/* VTTBR value associated with above pgd and vmid */
 	/* VTTBR value associated with above pgd and vmid */
 	u64    vttbr;
 	u64    vttbr;
 
 
+	/* The last vcpu id that ran on each physical CPU */
+	int __percpu *last_vcpu_ran;
+
 	/* The maximum number of vCPUs depends on the used GIC model */
 	/* The maximum number of vCPUs depends on the used GIC model */
 	int max_vcpus;
 	int max_vcpus;
 
 

+ 1 - 1
arch/arm64/include/asm/kvm_mmu.h

@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 	return v;
 	return v;
 }
 }
 
 
-#define kern_hyp_va(v) 	(typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
 
 /*
 /*
  * We currently only support a 40bit IPA.
  * We currently only support a 40bit IPA.

+ 0 - 1
arch/arm64/include/asm/lse.h

@@ -5,7 +5,6 @@
 
 
 #include <linux/stringify.h>
 #include <linux/stringify.h>
 #include <asm/alternative.h>
 #include <asm/alternative.h>
-#include <asm/cpufeature.h>
 
 
 #ifdef __ASSEMBLER__
 #ifdef __ASSEMBLER__
 
 

+ 9 - 1
arch/arm64/include/asm/perf_event.h

@@ -46,7 +46,15 @@
 #define	ARMV8_PMU_EVTYPE_MASK	0xc800ffff	/* Mask for writable bits */
 #define	ARMV8_PMU_EVTYPE_MASK	0xc800ffff	/* Mask for writable bits */
 #define	ARMV8_PMU_EVTYPE_EVENT	0xffff		/* Mask for EVENT bits */
 #define	ARMV8_PMU_EVTYPE_EVENT	0xffff		/* Mask for EVENT bits */
 
 
-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR	0	/* Software increment event */
+/*
+ * PMUv3 event types: required events
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR				0x00
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL			0x03
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE				0x04
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED				0x10
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES				0x11
+#define ARMV8_PMUV3_PERFCTR_BR_PRED				0x12
 
 
 /*
 /*
  * Event filters for PMUv3
  * Event filters for PMUv3

+ 1 - 9
arch/arm64/kernel/perf_event.c

@@ -31,17 +31,9 @@
 
 
 /*
 /*
  * ARMv8 PMUv3 Performance Events handling code.
  * ARMv8 PMUv3 Performance Events handling code.
- * Common event types.
+ * Common event types (some are defined in asm/perf_event.h).
  */
  */
 
 
-/* Required events. */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR				0x00
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL			0x03
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE				0x04
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED				0x10
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES				0x11
-#define ARMV8_PMUV3_PERFCTR_BR_PRED				0x12
-
 /* At least one of the following is required. */
 /* At least one of the following is required. */
 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED			0x08
 #define ARMV8_PMUV3_PERFCTR_INST_RETIRED			0x08
 #define ARMV8_PMUV3_PERFCTR_INST_SPEC				0x1B
 #define ARMV8_PMUV3_PERFCTR_INST_SPEC				0x1B

+ 15 - 0
arch/arm64/kvm/hyp/tlb.c

@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 	write_sysreg(0, vttbr_el2);
 	write_sysreg(0, vttbr_el2);
 }
 }
 
 
+void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+
+	/* Switch to requested VMID */
+	write_sysreg(kvm->arch.vttbr, vttbr_el2);
+	isb();
+
+	asm volatile("tlbi vmalle1" : : );
+	dsb(nsh);
+	isb();
+
+	write_sysreg(0, vttbr_el2);
+}
+
 void __hyp_text __kvm_flush_vm_context(void)
 void __hyp_text __kvm_flush_vm_context(void)
 {
 {
 	dsb(ishst);
 	dsb(ishst);

+ 8 - 2
arch/arm64/kvm/sys_regs.c

@@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 
 
 			idx = ARMV8_PMU_CYCLE_IDX;
 			idx = ARMV8_PMU_CYCLE_IDX;
 		} else {
 		} else {
-			BUG();
+			return false;
 		}
 		}
+	} else if (r->CRn == 0 && r->CRm == 9) {
+		/* PMCCNTR */
+		if (pmu_access_event_counter_el0_disabled(vcpu))
+			return false;
+
+		idx = ARMV8_PMU_CYCLE_IDX;
 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
 		/* PMEVCNTRn_EL0 */
 		/* PMEVCNTRn_EL0 */
 		if (pmu_access_event_counter_el0_disabled(vcpu))
 		if (pmu_access_event_counter_el0_disabled(vcpu))
@@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 
 
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
 	} else {
 	} else {
-		BUG();
+		return false;
 	}
 	}
 
 
 	if (!pmu_counter_idx_valid(vcpu, idx))
 	if (!pmu_counter_idx_valid(vcpu, idx))

+ 1 - 1
arch/mips/Makefile

@@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
 
 
 bootvars-y	= VMLINUX_LOAD_ADDRESS=$(load-y) \
 bootvars-y	= VMLINUX_LOAD_ADDRESS=$(load-y) \
 		  VMLINUX_ENTRY_ADDRESS=$(entry-y) \
 		  VMLINUX_ENTRY_ADDRESS=$(entry-y) \
-		  PLATFORM=$(platform-y)
+		  PLATFORM="$(platform-y)"
 ifdef CONFIG_32BIT
 ifdef CONFIG_32BIT
 bootvars-y	+= ADDR_BITS=32
 bootvars-y	+= ADDR_BITS=32
 endif
 endif

+ 2 - 1
arch/mips/boot/dts/mti/malta.dts

@@ -84,12 +84,13 @@
 	fpga_regs: system-controller@1f000000 {
 	fpga_regs: system-controller@1f000000 {
 		compatible = "mti,malta-fpga", "syscon", "simple-mfd";
 		compatible = "mti,malta-fpga", "syscon", "simple-mfd";
 		reg = <0x1f000000 0x1000>;
 		reg = <0x1f000000 0x1000>;
+		native-endian;
 
 
 		reboot {
 		reboot {
 			compatible = "syscon-reboot";
 			compatible = "syscon-reboot";
 			regmap = <&fpga_regs>;
 			regmap = <&fpga_regs>;
 			offset = <0x500>;
 			offset = <0x500>;
-			mask = <0x4d>;
+			mask = <0x42>;
 		};
 		};
 	};
 	};
 
 

+ 10 - 6
arch/mips/generic/init.c

@@ -29,10 +29,20 @@ static __initdata const struct mips_machine *mach;
 static __initdata const void *mach_match_data;
 static __initdata const void *mach_match_data;
 
 
 void __init prom_init(void)
 void __init prom_init(void)
+{
+	plat_get_fdt();
+	BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
 {
 {
 	const struct mips_machine *check_mach;
 	const struct mips_machine *check_mach;
 	const struct of_device_id *match;
 	const struct of_device_id *match;
 
 
+	if (fdt)
+		/* Already set up */
+		return (void *)fdt;
+
 	if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
 	if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
 		/*
 		/*
 		 * We booted using the UHI boot protocol, so we have been
 		 * We booted using the UHI boot protocol, so we have been
@@ -75,12 +85,6 @@ void __init prom_init(void)
 		/* Retrieve the machine's FDT */
 		/* Retrieve the machine's FDT */
 		fdt = mach->fdt;
 		fdt = mach->fdt;
 	}
 	}
-
-	BUG_ON(!fdt);
-}
-
-void __init *plat_get_fdt(void)
-{
 	return (void *)fdt;
 	return (void *)fdt;
 }
 }
 
 

+ 13 - 0
arch/mips/include/asm/fpu_emulator.h

@@ -63,6 +63,8 @@ do {									\
 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
 				    struct mips_fpu_struct *ctx, int has_fpu,
 				    struct mips_fpu_struct *ctx, int has_fpu,
 				    void *__user *fault_addr);
 				    void *__user *fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+		     struct task_struct *tsk);
 int process_fpemu_return(int sig, void __user *fault_addr,
 int process_fpemu_return(int sig, void __user *fault_addr,
 			 unsigned long fcr31);
 			 unsigned long fcr31);
 int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void)
 		set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
 		set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
 }
 }
 
 
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+	return fcr31 & (FPU_CSR_UNI_X |
+			((fcr31 & FPU_CSR_ALL_E) <<
+			 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
 #endif /* _ASM_FPU_EMULATOR_H */
 #endif /* _ASM_FPU_EMULATOR_H */

+ 4 - 3
arch/mips/include/asm/kvm_host.h

@@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
 	/* Host KSEG0 address of the EI/DI offset */
 	/* Host KSEG0 address of the EI/DI offset */
 	void *kseg0_commpage;
 	void *kseg0_commpage;
 
 
-	u32 io_gpr;		/* GPR used as IO source/target */
+	/* Resume PC after MMIO completion */
+	unsigned long io_pc;
+	/* GPR used as IO source/target */
+	u32 io_gpr;
 
 
 	struct hrtimer comparecount_timer;
 	struct hrtimer comparecount_timer;
 	/* Count timer control KVM register */
 	/* Count timer control KVM register */
@@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
 	/* Bitmask of pending exceptions to be cleared */
 	/* Bitmask of pending exceptions to be cleared */
 	unsigned long pending_exceptions_clr;
 	unsigned long pending_exceptions_clr;
 
 
-	u32 pending_load_cause;
-
 	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
 	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
 	unsigned long preempt_entryhi;
 	unsigned long preempt_entryhi;
 
 

+ 18 - 0
arch/mips/include/asm/switch_to.h

@@ -75,6 +75,22 @@ do {	if (cpu_has_rw_llb) {						\
 	}								\
 	}								\
 } while (0)
 } while (0)
 
 
+/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#define __sanitize_fcr31(next)						\
+do {									\
+	unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31);	\
+	void __user *pc;						\
+									\
+	if (unlikely(fcr31)) {						\
+		pc = (void __user *)task_pt_regs(next)->cp0_epc;	\
+		next->thread.fpu.fcr31 &= ~fcr31;			\
+		force_fcr31_sig(fcr31, pc, next);			\
+	}								\
+} while (0)
+
 /*
 /*
  * For newly created kernel threads switch_to() will return to
  * For newly created kernel threads switch_to() will return to
  * ret_from_kernel_thread, newly created user threads to ret_from_fork.
  * ret_from_kernel_thread, newly created user threads to ret_from_fork.
@@ -85,6 +101,8 @@ do {	if (cpu_has_rw_llb) {						\
 do {									\
 do {									\
 	__mips_mt_fpaff_switch_to(prev);				\
 	__mips_mt_fpaff_switch_to(prev);				\
 	lose_fpu_inatomic(1, prev);					\
 	lose_fpu_inatomic(1, prev);					\
+	if (tsk_used_math(next))					\
+		__sanitize_fcr31(next);					\
 	if (cpu_has_dsp) {						\
 	if (cpu_has_dsp) {						\
 		__save_dsp(prev);					\
 		__save_dsp(prev);					\
 		__restore_dsp(next);					\
 		__restore_dsp(next);					\

+ 10 - 1
arch/mips/kernel/mips-cpc.c

@@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
 
 
 static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
 static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
 
 
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+	return 0;
+}
+
 /**
 /**
  * mips_cpc_phys_base - retrieve the physical base address of the CPC
  * mips_cpc_phys_base - retrieve the physical base address of the CPC
  *
  *
@@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void)
 	if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
 	if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
 		return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
 		return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
 
 
-	/* Otherwise, give it the default address & enable it */
+	/* Otherwise, use the default address */
 	cpc_base = mips_cpc_default_phys_base();
 	cpc_base = mips_cpc_default_phys_base();
+	if (!cpc_base)
+		return cpc_base;
+
+	/* Enable the CPC, mapped at the default address */
 	write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
 	write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
 	return cpc_base;
 	return cpc_base;
 }
 }

+ 5 - 5
arch/mips/kernel/mips-r2-to-r6-emul.c

@@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
  * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
  * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
  * @regs: Process register set
  * @regs: Process register set
  * @inst: Instruction to decode and emulate
  * @inst: Instruction to decode and emulate
- * @fcr31: Floating Point Control and Status Register returned
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
  */
  */
 int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
 int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
 {
 {
@@ -1172,13 +1172,13 @@ fpu_emul:
 
 
 		err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
 		err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
 					       &fault_addr);
 					       &fault_addr);
-		*fcr31 = current->thread.fpu.fcr31;
 
 
 		/*
 		/*
-		 * We can't allow the emulated instruction to leave any of
-		 * the cause bits set in $fcr31.
+		 * We can't allow the emulated instruction to leave any
+		 * enabled Cause bits set in $fcr31.
 		 */
 		 */
-		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+		*fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+		current->thread.fpu.fcr31 &= ~res;
 
 
 		/*
 		/*
 		 * this is a tricky issue - lose_fpu() uses LL/SC atomics
 		 * this is a tricky issue - lose_fpu() uses LL/SC atomics

+ 4 - 4
arch/mips/kernel/ptrace.c

@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
 }
 }
 
 
 /*
 /*
- * Poke at FCSR according to its mask.  Don't set the cause bits as
- * this is currently not handled correctly in FP context restoration
- * and will cause an oops if a corresponding enable bit is set.
+ * Poke at FCSR according to its mask.  Set the Cause bits even
+ * if a corresponding Enable bit is set.  This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
  */
  */
 static void ptrace_setfcr31(struct task_struct *child, u32 value)
 static void ptrace_setfcr31(struct task_struct *child, u32 value)
 {
 {
 	u32 fcr31;
 	u32 fcr31;
 	u32 mask;
 	u32 mask;
 
 
-	value &= ~FPU_CSR_ALL_X;
 	fcr31 = child->thread.fpu.fcr31;
 	fcr31 = child->thread.fpu.fcr31;
 	mask = boot_cpu_data.fpu_msk31;
 	mask = boot_cpu_data.fpu_msk31;
 	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
 	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
 			break;
 			break;
 #endif
 #endif
 		case FPC_CSR:
 		case FPC_CSR:
+			init_fp_ctx(child);
 			ptrace_setfcr31(child, data);
 			ptrace_setfcr31(child, data);
 			break;
 			break;
 		case DSP_BASE ... DSP_BASE + 5: {
 		case DSP_BASE ... DSP_BASE + 5: {

+ 58 - 80
arch/mips/kernel/r2300_fpu.S

@@ -19,108 +19,86 @@
 #include <asm/regdef.h>
 #include <asm/regdef.h>
 
 
 #define EX(a,b)							\
 #define EX(a,b)							\
+9:	a,##b;							\
+	.section __ex_table,"a";				\
+	PTR	9b,fault;					\
+	.previous
+
+#define EX2(a,b)						\
 9:	a,##b;							\
 9:	a,##b;							\
 	.section __ex_table,"a";				\
 	.section __ex_table,"a";				\
 	PTR	9b,bad_stack;					\
 	PTR	9b,bad_stack;					\
+	PTR	9b+4,bad_stack;					\
 	.previous
 	.previous
 
 
 	.set	noreorder
 	.set	noreorder
 	.set	mips1
 	.set	mips1
-	/* Save floating point context */
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
 LEAF(_save_fp_context)
 LEAF(_save_fp_context)
 	.set	push
 	.set	push
 	SET_HARDFLOAT
 	SET_HARDFLOAT
 	li	v0, 0					# assume success
 	li	v0, 0					# assume success
-	cfc1	t1,fcr31
-	EX(swc1 $f0,(SC_FPREGS+0)(a0))
-	EX(swc1 $f1,(SC_FPREGS+8)(a0))
-	EX(swc1 $f2,(SC_FPREGS+16)(a0))
-	EX(swc1 $f3,(SC_FPREGS+24)(a0))
-	EX(swc1 $f4,(SC_FPREGS+32)(a0))
-	EX(swc1 $f5,(SC_FPREGS+40)(a0))
-	EX(swc1 $f6,(SC_FPREGS+48)(a0))
-	EX(swc1 $f7,(SC_FPREGS+56)(a0))
-	EX(swc1 $f8,(SC_FPREGS+64)(a0))
-	EX(swc1 $f9,(SC_FPREGS+72)(a0))
-	EX(swc1 $f10,(SC_FPREGS+80)(a0))
-	EX(swc1 $f11,(SC_FPREGS+88)(a0))
-	EX(swc1 $f12,(SC_FPREGS+96)(a0))
-	EX(swc1 $f13,(SC_FPREGS+104)(a0))
-	EX(swc1 $f14,(SC_FPREGS+112)(a0))
-	EX(swc1 $f15,(SC_FPREGS+120)(a0))
-	EX(swc1 $f16,(SC_FPREGS+128)(a0))
-	EX(swc1 $f17,(SC_FPREGS+136)(a0))
-	EX(swc1 $f18,(SC_FPREGS+144)(a0))
-	EX(swc1 $f19,(SC_FPREGS+152)(a0))
-	EX(swc1 $f20,(SC_FPREGS+160)(a0))
-	EX(swc1 $f21,(SC_FPREGS+168)(a0))
-	EX(swc1 $f22,(SC_FPREGS+176)(a0))
-	EX(swc1 $f23,(SC_FPREGS+184)(a0))
-	EX(swc1 $f24,(SC_FPREGS+192)(a0))
-	EX(swc1 $f25,(SC_FPREGS+200)(a0))
-	EX(swc1 $f26,(SC_FPREGS+208)(a0))
-	EX(swc1 $f27,(SC_FPREGS+216)(a0))
-	EX(swc1 $f28,(SC_FPREGS+224)(a0))
-	EX(swc1 $f29,(SC_FPREGS+232)(a0))
-	EX(swc1 $f30,(SC_FPREGS+240)(a0))
-	EX(swc1 $f31,(SC_FPREGS+248)(a0))
-	EX(sw	t1,(SC_FPC_CSR)(a0))
-	cfc1	t0,$0				# implementation/version
+	cfc1	t1, fcr31
+	EX2(s.d $f0, 0(a0))
+	EX2(s.d $f2, 16(a0))
+	EX2(s.d $f4, 32(a0))
+	EX2(s.d $f6, 48(a0))
+	EX2(s.d $f8, 64(a0))
+	EX2(s.d $f10, 80(a0))
+	EX2(s.d $f12, 96(a0))
+	EX2(s.d $f14, 112(a0))
+	EX2(s.d $f16, 128(a0))
+	EX2(s.d $f18, 144(a0))
+	EX2(s.d $f20, 160(a0))
+	EX2(s.d $f22, 176(a0))
+	EX2(s.d $f24, 192(a0))
+	EX2(s.d $f26, 208(a0))
+	EX2(s.d $f28, 224(a0))
+	EX2(s.d $f30, 240(a0))
 	jr	ra
 	jr	ra
+	 EX(sw	t1, (a1))
 	.set	pop
 	.set	pop
-	.set	nomacro
-	 EX(sw	t0,(SC_FPC_EIR)(a0))
-	.set	macro
 	END(_save_fp_context)
 	END(_save_fp_context)
 
 
-/*
- * Restore FPU state:
- *  - fp gp registers
- *  - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
  *
  *
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
  */
  */
 LEAF(_restore_fp_context)
 LEAF(_restore_fp_context)
 	.set	push
 	.set	push
 	SET_HARDFLOAT
 	SET_HARDFLOAT
 	li	v0, 0					# assume success
 	li	v0, 0					# assume success
-	EX(lw t0,(SC_FPC_CSR)(a0))
-	EX(lwc1 $f0,(SC_FPREGS+0)(a0))
-	EX(lwc1 $f1,(SC_FPREGS+8)(a0))
-	EX(lwc1 $f2,(SC_FPREGS+16)(a0))
-	EX(lwc1 $f3,(SC_FPREGS+24)(a0))
-	EX(lwc1 $f4,(SC_FPREGS+32)(a0))
-	EX(lwc1 $f5,(SC_FPREGS+40)(a0))
-	EX(lwc1 $f6,(SC_FPREGS+48)(a0))
-	EX(lwc1 $f7,(SC_FPREGS+56)(a0))
-	EX(lwc1 $f8,(SC_FPREGS+64)(a0))
-	EX(lwc1 $f9,(SC_FPREGS+72)(a0))
-	EX(lwc1 $f10,(SC_FPREGS+80)(a0))
-	EX(lwc1 $f11,(SC_FPREGS+88)(a0))
-	EX(lwc1 $f12,(SC_FPREGS+96)(a0))
-	EX(lwc1 $f13,(SC_FPREGS+104)(a0))
-	EX(lwc1 $f14,(SC_FPREGS+112)(a0))
-	EX(lwc1 $f15,(SC_FPREGS+120)(a0))
-	EX(lwc1 $f16,(SC_FPREGS+128)(a0))
-	EX(lwc1 $f17,(SC_FPREGS+136)(a0))
-	EX(lwc1 $f18,(SC_FPREGS+144)(a0))
-	EX(lwc1 $f19,(SC_FPREGS+152)(a0))
-	EX(lwc1 $f20,(SC_FPREGS+160)(a0))
-	EX(lwc1 $f21,(SC_FPREGS+168)(a0))
-	EX(lwc1 $f22,(SC_FPREGS+176)(a0))
-	EX(lwc1 $f23,(SC_FPREGS+184)(a0))
-	EX(lwc1 $f24,(SC_FPREGS+192)(a0))
-	EX(lwc1 $f25,(SC_FPREGS+200)(a0))
-	EX(lwc1 $f26,(SC_FPREGS+208)(a0))
-	EX(lwc1 $f27,(SC_FPREGS+216)(a0))
-	EX(lwc1 $f28,(SC_FPREGS+224)(a0))
-	EX(lwc1 $f29,(SC_FPREGS+232)(a0))
-	EX(lwc1 $f30,(SC_FPREGS+240)(a0))
-	EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+	EX(lw t0, (a1))
+	EX2(l.d $f0, 0(a0))
+	EX2(l.d $f2, 16(a0))
+	EX2(l.d $f4, 32(a0))
+	EX2(l.d $f6, 48(a0))
+	EX2(l.d $f8, 64(a0))
+	EX2(l.d $f10, 80(a0))
+	EX2(l.d $f12, 96(a0))
+	EX2(l.d $f14, 112(a0))
+	EX2(l.d $f16, 128(a0))
+	EX2(l.d $f18, 144(a0))
+	EX2(l.d $f20, 160(a0))
+	EX2(l.d $f22, 176(a0))
+	EX2(l.d $f24, 192(a0))
+	EX2(l.d $f26, 208(a0))
+	EX2(l.d $f28, 224(a0))
+	EX2(l.d $f30, 240(a0))
 	jr	ra
 	jr	ra
-	 ctc1	t0,fcr31
+	 ctc1	t0, fcr31
 	.set	pop
 	.set	pop
 	END(_restore_fp_context)
 	END(_restore_fp_context)
 	.set	reorder
 	.set	reorder

+ 48 - 41
arch/mips/kernel/r6000_fpu.S

@@ -21,7 +21,14 @@
 	.set	push
 	.set	push
 	SET_HARDFLOAT
 	SET_HARDFLOAT
 
 
-	/* Save floating point context */
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
 	LEAF(_save_fp_context)
 	LEAF(_save_fp_context)
 	mfc0	t0,CP0_STATUS
 	mfc0	t0,CP0_STATUS
 	sll	t0,t0,2
 	sll	t0,t0,2
@@ -30,59 +37,59 @@
 
 
 	cfc1	t1,fcr31
 	cfc1	t1,fcr31
 	/* Store the 16 double precision registers */
 	/* Store the 16 double precision registers */
-	sdc1	$f0,(SC_FPREGS+0)(a0)
-	sdc1	$f2,(SC_FPREGS+16)(a0)
-	sdc1	$f4,(SC_FPREGS+32)(a0)
-	sdc1	$f6,(SC_FPREGS+48)(a0)
-	sdc1	$f8,(SC_FPREGS+64)(a0)
-	sdc1	$f10,(SC_FPREGS+80)(a0)
-	sdc1	$f12,(SC_FPREGS+96)(a0)
-	sdc1	$f14,(SC_FPREGS+112)(a0)
-	sdc1	$f16,(SC_FPREGS+128)(a0)
-	sdc1	$f18,(SC_FPREGS+144)(a0)
-	sdc1	$f20,(SC_FPREGS+160)(a0)
-	sdc1	$f22,(SC_FPREGS+176)(a0)
-	sdc1	$f24,(SC_FPREGS+192)(a0)
-	sdc1	$f26,(SC_FPREGS+208)(a0)
-	sdc1	$f28,(SC_FPREGS+224)(a0)
-	sdc1	$f30,(SC_FPREGS+240)(a0)
+	sdc1	$f0,0(a0)
+	sdc1	$f2,16(a0)
+	sdc1	$f4,32(a0)
+	sdc1	$f6,48(a0)
+	sdc1	$f8,64(a0)
+	sdc1	$f10,80(a0)
+	sdc1	$f12,96(a0)
+	sdc1	$f14,112(a0)
+	sdc1	$f16,128(a0)
+	sdc1	$f18,144(a0)
+	sdc1	$f20,160(a0)
+	sdc1	$f22,176(a0)
+	sdc1	$f24,192(a0)
+	sdc1	$f26,208(a0)
+	sdc1	$f28,224(a0)
+	sdc1	$f30,240(a0)
 	jr	ra
 	jr	ra
-	 sw	t0,SC_FPC_CSR(a0)
+	 sw	t0,(a1)
 1:	jr	ra
 1:	jr	ra
 	 nop
 	 nop
 	END(_save_fp_context)
 	END(_save_fp_context)
 
 
-/* Restore FPU state:
- *  - fp gp registers
- *  - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
  *
  *
- * We base the decision which registers to restore from the signal stack
- * frame on the current content of c0_status, not on the content of the
- * stack frame which might have been changed by the user.
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
  */
  */
 	LEAF(_restore_fp_context)
 	LEAF(_restore_fp_context)
 	mfc0	t0,CP0_STATUS
 	mfc0	t0,CP0_STATUS
 	sll	t0,t0,2
 	sll	t0,t0,2
 
 
 	bgez	t0,1f
 	bgez	t0,1f
-	 lw	t0,SC_FPC_CSR(a0)
+	 lw	t0,(a1)
 	/* Restore the 16 double precision registers */
 	/* Restore the 16 double precision registers */
-	ldc1	$f0,(SC_FPREGS+0)(a0)
-	ldc1	$f2,(SC_FPREGS+16)(a0)
-	ldc1	$f4,(SC_FPREGS+32)(a0)
-	ldc1	$f6,(SC_FPREGS+48)(a0)
-	ldc1	$f8,(SC_FPREGS+64)(a0)
-	ldc1	$f10,(SC_FPREGS+80)(a0)
-	ldc1	$f12,(SC_FPREGS+96)(a0)
-	ldc1	$f14,(SC_FPREGS+112)(a0)
-	ldc1	$f16,(SC_FPREGS+128)(a0)
-	ldc1	$f18,(SC_FPREGS+144)(a0)
-	ldc1	$f20,(SC_FPREGS+160)(a0)
-	ldc1	$f22,(SC_FPREGS+176)(a0)
-	ldc1	$f24,(SC_FPREGS+192)(a0)
-	ldc1	$f26,(SC_FPREGS+208)(a0)
-	ldc1	$f28,(SC_FPREGS+224)(a0)
-	ldc1	$f30,(SC_FPREGS+240)(a0)
+	ldc1	$f0,0(a0)
+	ldc1	$f2,16(a0)
+	ldc1	$f4,32(a0)
+	ldc1	$f6,48(a0)
+	ldc1	$f8,64(a0)
+	ldc1	$f10,80(a0)
+	ldc1	$f12,96(a0)
+	ldc1	$f14,112(a0)
+	ldc1	$f16,128(a0)
+	ldc1	$f18,144(a0)
+	ldc1	$f20,160(a0)
+	ldc1	$f22,176(a0)
+	ldc1	$f24,192(a0)
+	ldc1	$f26,208(a0)
+	ldc1	$f28,224(a0)
+	ldc1	$f30,240(a0)
 	jr	ra
 	jr	ra
 	 ctc1	t0,fcr31
 	 ctc1	t0,fcr31
 1:	jr	ra
 1:	jr	ra

+ 1 - 1
arch/mips/kernel/relocate.c

@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
 
 
 #if defined(CONFIG_USE_OF)
 #if defined(CONFIG_USE_OF)
 	/* Get any additional entropy passed in device tree */
 	/* Get any additional entropy passed in device tree */
-	{
+	if (initial_boot_params) {
 		int node, len;
 		int node, len;
 		u64 *prop;
 		u64 *prop;
 
 

+ 13 - 0
arch/mips/kernel/setup.c

@@ -368,6 +368,19 @@ static void __init bootmem_init(void)
 		end = PFN_DOWN(boot_mem_map.map[i].addr
 		end = PFN_DOWN(boot_mem_map.map[i].addr
 				+ boot_mem_map.map[i].size);
 				+ boot_mem_map.map[i].size);
 
 
+#ifndef CONFIG_HIGHMEM
+		/*
+		 * Skip highmem here so we get an accurate max_low_pfn if low
+		 * memory stops short of high memory.
+		 * If the region overlaps HIGHMEM_START, end is clipped so
+		 * max_pfn excludes the highmem portion.
+		 */
+		if (start >= PFN_DOWN(HIGHMEM_START))
+			continue;
+		if (end > PFN_DOWN(HIGHMEM_START))
+			end = PFN_DOWN(HIGHMEM_START);
+#endif
+
 		if (end > max_low_pfn)
 		if (end > max_low_pfn)
 			max_low_pfn = end;
 			max_low_pfn = end;
 		if (start < min_low_pfn)
 		if (start < min_low_pfn)

+ 73 - 64
arch/mips/kernel/traps.c

@@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 		print_ip_sym(pc);
 		print_ip_sym(pc);
 		pc = unwind_stack(task, &sp, pc, &ra);
 		pc = unwind_stack(task, &sp, pc, &ra);
 	} while (pc);
 	} while (pc);
-	printk("\n");
+	pr_cont("\n");
 }
 }
 
 
 /*
 /*
@@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task,
 	printk("Stack :");
 	printk("Stack :");
 	i = 0;
 	i = 0;
 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
-		if (i && ((i % (64 / field)) == 0))
-			printk("\n	 ");
+		if (i && ((i % (64 / field)) == 0)) {
+			pr_cont("\n");
+			printk("       ");
+		}
 		if (i > 39) {
 		if (i > 39) {
-			printk(" ...");
+			pr_cont(" ...");
 			break;
 			break;
 		}
 		}
 
 
 		if (__get_user(stackdata, sp++)) {
 		if (__get_user(stackdata, sp++)) {
-			printk(" (Bad stack address)");
+			pr_cont(" (Bad stack address)");
 			break;
 			break;
 		}
 		}
 
 
-		printk(" %0*lx", field, stackdata);
+		pr_cont(" %0*lx", field, stackdata);
 		i++;
 		i++;
 	}
 	}
-	printk("\n");
+	pr_cont("\n");
 	show_backtrace(task, regs);
 	show_backtrace(task, regs);
 }
 }
 
 
@@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc)
 	long i;
 	long i;
 	unsigned short __user *pc16 = NULL;
 	unsigned short __user *pc16 = NULL;
 
 
-	printk("\nCode:");
+	printk("Code:");
 
 
 	if ((unsigned long)pc & 1)
 	if ((unsigned long)pc & 1)
 		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 	for(i = -3 ; i < 6 ; i++) {
 	for(i = -3 ; i < 6 ; i++) {
 		unsigned int insn;
 		unsigned int insn;
 		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
-			printk(" (Bad address in epc)\n");
+			pr_cont(" (Bad address in epc)\n");
 			break;
 			break;
 		}
 		}
-		printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
+		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 	}
 	}
+	pr_cont("\n");
 }
 }
 
 
 static void __show_regs(const struct pt_regs *regs)
 static void __show_regs(const struct pt_regs *regs)
@@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs)
 		if ((i % 4) == 0)
 		if ((i % 4) == 0)
 			printk("$%2d   :", i);
 			printk("$%2d   :", i);
 		if (i == 0)
 		if (i == 0)
-			printk(" %0*lx", field, 0UL);
+			pr_cont(" %0*lx", field, 0UL);
 		else if (i == 26 || i == 27)
 		else if (i == 26 || i == 27)
-			printk(" %*s", field, "");
+			pr_cont(" %*s", field, "");
 		else
 		else
-			printk(" %0*lx", field, regs->regs[i]);
+			pr_cont(" %0*lx", field, regs->regs[i]);
 
 
 		i++;
 		i++;
 		if ((i % 4) == 0)
 		if ((i % 4) == 0)
-			printk("\n");
+			pr_cont("\n");
 	}
 	}
 
 
 #ifdef CONFIG_CPU_HAS_SMARTMIPS
 #ifdef CONFIG_CPU_HAS_SMARTMIPS
@@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs)
 
 
 	if (cpu_has_3kex) {
 	if (cpu_has_3kex) {
 		if (regs->cp0_status & ST0_KUO)
 		if (regs->cp0_status & ST0_KUO)
-			printk("KUo ");
+			pr_cont("KUo ");
 		if (regs->cp0_status & ST0_IEO)
 		if (regs->cp0_status & ST0_IEO)
-			printk("IEo ");
+			pr_cont("IEo ");
 		if (regs->cp0_status & ST0_KUP)
 		if (regs->cp0_status & ST0_KUP)
-			printk("KUp ");
+			pr_cont("KUp ");
 		if (regs->cp0_status & ST0_IEP)
 		if (regs->cp0_status & ST0_IEP)
-			printk("IEp ");
+			pr_cont("IEp ");
 		if (regs->cp0_status & ST0_KUC)
 		if (regs->cp0_status & ST0_KUC)
-			printk("KUc ");
+			pr_cont("KUc ");
 		if (regs->cp0_status & ST0_IEC)
 		if (regs->cp0_status & ST0_IEC)
-			printk("IEc ");
+			pr_cont("IEc ");
 	} else if (cpu_has_4kex) {
 	} else if (cpu_has_4kex) {
 		if (regs->cp0_status & ST0_KX)
 		if (regs->cp0_status & ST0_KX)
-			printk("KX ");
+			pr_cont("KX ");
 		if (regs->cp0_status & ST0_SX)
 		if (regs->cp0_status & ST0_SX)
-			printk("SX ");
+			pr_cont("SX ");
 		if (regs->cp0_status & ST0_UX)
 		if (regs->cp0_status & ST0_UX)
-			printk("UX ");
+			pr_cont("UX ");
 		switch (regs->cp0_status & ST0_KSU) {
 		switch (regs->cp0_status & ST0_KSU) {
 		case KSU_USER:
 		case KSU_USER:
-			printk("USER ");
+			pr_cont("USER ");
 			break;
 			break;
 		case KSU_SUPERVISOR:
 		case KSU_SUPERVISOR:
-			printk("SUPERVISOR ");
+			pr_cont("SUPERVISOR ");
 			break;
 			break;
 		case KSU_KERNEL:
 		case KSU_KERNEL:
-			printk("KERNEL ");
+			pr_cont("KERNEL ");
 			break;
 			break;
 		default:
 		default:
-			printk("BAD_MODE ");
+			pr_cont("BAD_MODE ");
 			break;
 			break;
 		}
 		}
 		if (regs->cp0_status & ST0_ERL)
 		if (regs->cp0_status & ST0_ERL)
-			printk("ERL ");
+			pr_cont("ERL ");
 		if (regs->cp0_status & ST0_EXL)
 		if (regs->cp0_status & ST0_EXL)
-			printk("EXL ");
+			pr_cont("EXL ");
 		if (regs->cp0_status & ST0_IE)
 		if (regs->cp0_status & ST0_IE)
-			printk("IE ");
+			pr_cont("IE ");
 	}
 	}
-	printk("\n");
+	pr_cont("\n");
 
 
 	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
@@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
 	exception_exit(prev_state);
 	exception_exit(prev_state);
 }
 }
 
 
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits.  This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+		     struct task_struct *tsk)
+{
+	struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+
+	if (fcr31 & FPU_CSR_INV_X)
+		si.si_code = FPE_FLTINV;
+	else if (fcr31 & FPU_CSR_DIV_X)
+		si.si_code = FPE_FLTDIV;
+	else if (fcr31 & FPU_CSR_OVF_X)
+		si.si_code = FPE_FLTOVF;
+	else if (fcr31 & FPU_CSR_UDF_X)
+		si.si_code = FPE_FLTUND;
+	else if (fcr31 & FPU_CSR_INE_X)
+		si.si_code = FPE_FLTRES;
+	else
+		si.si_code = __SI_FAULT;
+	force_sig_info(SIGFPE, &si, tsk);
+}
+
 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 {
 {
 	struct siginfo si = { 0 };
 	struct siginfo si = { 0 };
@@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 		return 0;
 		return 0;
 
 
 	case SIGFPE:
 	case SIGFPE:
-		si.si_addr = fault_addr;
-		si.si_signo = sig;
-		/*
-		 * Inexact can happen together with Overflow or Underflow.
-		 * Respect the mask to deliver the correct exception.
-		 */
-		fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
-			 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
-		if (fcr31 & FPU_CSR_INV_X)
-			si.si_code = FPE_FLTINV;
-		else if (fcr31 & FPU_CSR_DIV_X)
-			si.si_code = FPE_FLTDIV;
-		else if (fcr31 & FPU_CSR_OVF_X)
-			si.si_code = FPE_FLTOVF;
-		else if (fcr31 & FPU_CSR_UDF_X)
-			si.si_code = FPE_FLTUND;
-		else if (fcr31 & FPU_CSR_INE_X)
-			si.si_code = FPE_FLTRES;
-		else
-			si.si_code = __SI_FAULT;
-		force_sig_info(sig, &si, current);
+		force_fcr31_sig(fcr31, fault_addr, current);
 		return 1;
 		return 1;
 
 
 	case SIGBUS:
 	case SIGBUS:
@@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 	/* Run the emulator */
 	/* Run the emulator */
 	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 				       &fault_addr);
 				       &fault_addr);
-	fcr31 = current->thread.fpu.fcr31;
 
 
 	/*
 	/*
-	 * We can't allow the emulated instruction to leave any of
-	 * the cause bits set in $fcr31.
+	 * We can't allow the emulated instruction to leave any
+	 * enabled Cause bits set in $fcr31.
 	 */
 	 */
-	current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+	current->thread.fpu.fcr31 &= ~fcr31;
 
 
 	/* Restore the hardware register state */
 	/* Restore the hardware register state */
 	own_fpu(1);
 	own_fpu(1);
@@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 		goto out;
 		goto out;
 
 
 	/* Clear FCSR.Cause before enabling interrupts */
 	/* Clear FCSR.Cause before enabling interrupts */
-	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 	local_irq_enable();
 	local_irq_enable();
 
 
 	die_if_kernel("FP exception in kernel code", regs);
 	die_if_kernel("FP exception in kernel code", regs);
@@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 		/* Run the emulator */
 		/* Run the emulator */
 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 					       &fault_addr);
 					       &fault_addr);
-		fcr31 = current->thread.fpu.fcr31;
 
 
 		/*
 		/*
-		 * We can't allow the emulated instruction to leave any of
-		 * the cause bits set in $fcr31.
+		 * We can't allow the emulated instruction to leave any
+		 * enabled Cause bits set in $fcr31.
 		 */
 		 */
-		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+		current->thread.fpu.fcr31 &= ~fcr31;
 
 
 		/* Restore the hardware register state */
 		/* Restore the hardware register state */
 		own_fpu(1);	/* Using the FPU again.	 */
 		own_fpu(1);	/* Using the FPU again.	 */
@@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 
 
 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
 					       &fault_addr);
 					       &fault_addr);
-		fcr31 = current->thread.fpu.fcr31;
 
 
 		/*
 		/*
 		 * We can't allow the emulated instruction to leave
 		 * We can't allow the emulated instruction to leave
-		 * any of the cause bits set in $fcr31.
+		 * any enabled Cause bits set in $fcr31.
 		 */
 		 */
-		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+		current->thread.fpu.fcr31 &= ~fcr31;
 
 
 		/* Send a signal if required.  */
 		/* Send a signal if required.  */
 		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
 		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)

+ 19 - 13
arch/mips/kvm/emulate.c

@@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	enum emulation_result er = EMULATE_DONE;
 	enum emulation_result er = EMULATE_DONE;
 
 
-	if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+	if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+		kvm_clear_c0_guest_status(cop0, ST0_ERL);
+		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+	} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
 		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
 		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
 			  kvm_read_c0_guest_epc(cop0));
 			  kvm_read_c0_guest_epc(cop0));
 		kvm_clear_c0_guest_status(cop0, ST0_EXL);
 		kvm_clear_c0_guest_status(cop0, ST0_EXL);
 		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 
 
-	} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
-		kvm_clear_c0_guest_status(cop0, ST0_ERL);
-		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
 	} else {
 	} else {
 		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
 		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
 			vcpu->arch.pc);
 			vcpu->arch.pc);
@@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
 					    struct kvm_vcpu *vcpu)
 					    struct kvm_vcpu *vcpu)
 {
 {
 	enum emulation_result er = EMULATE_DO_MMIO;
 	enum emulation_result er = EMULATE_DO_MMIO;
+	unsigned long curr_pc;
 	u32 op, rt;
 	u32 op, rt;
 	u32 bytes;
 	u32 bytes;
 
 
 	rt = inst.i_format.rt;
 	rt = inst.i_format.rt;
 	op = inst.i_format.opcode;
 	op = inst.i_format.opcode;
 
 
-	vcpu->arch.pending_load_cause = cause;
+	/*
+	 * Find the resume PC now while we have safe and easy access to the
+	 * prior branch instruction, and save it for
+	 * kvm_mips_complete_mmio_load() to restore later.
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+	vcpu->arch.io_pc = vcpu->arch.pc;
+	vcpu->arch.pc = curr_pc;
+
 	vcpu->arch.io_gpr = rt;
 	vcpu->arch.io_gpr = rt;
 
 
 	switch (op) {
 	switch (op) {
@@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
 		goto done;
 		goto done;
 	}
 	}
 
 
-	er = update_pc(vcpu, vcpu->arch.pending_load_cause);
-	if (er == EMULATE_FAIL)
-		return er;
+	/* Restore saved resume PC */
+	vcpu->arch.pc = vcpu->arch.io_pc;
 
 
 	switch (run->mmio.len) {
 	switch (run->mmio.len) {
 	case 4:
 	case 4:
@@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
 		break;
 		break;
 	}
 	}
 
 
-	if (vcpu->arch.pending_load_cause & CAUSEF_BD)
-		kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
-			  vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
-			  vcpu->mmio_needed);
-
 done:
 done:
 	return er;
 	return er;
 }
 }

+ 4 - 1
arch/mips/kvm/mips.c

@@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
 static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
 {
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
-	int cpu = smp_processor_id();
+	int i, cpu = smp_processor_id();
 	unsigned int gasid;
 	unsigned int gasid;
 
 
 	/*
 	/*
@@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
 						vcpu);
 						vcpu);
 			vcpu->arch.guest_user_asid[cpu] =
 			vcpu->arch.guest_user_asid[cpu] =
 				vcpu->arch.guest_user_mm.context.asid[cpu];
 				vcpu->arch.guest_user_mm.context.asid[cpu];
+			for_each_possible_cpu(i)
+				if (i != cpu)
+					vcpu->arch.guest_user_asid[cpu] = 0;
 			vcpu->arch.last_user_gasid = gasid;
 			vcpu->arch.last_user_gasid = gasid;
 		}
 		}
 	}
 	}

+ 0 - 4
arch/mips/kvm/mmu.c

@@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 
 	if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
 	if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
 						asid_version_mask(cpu)) {
 						asid_version_mask(cpu)) {
-		u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
-				KVM_ENTRYHI_ASID;
-
 		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
 		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
 		vcpu->arch.guest_user_asid[cpu] =
 		vcpu->arch.guest_user_asid[cpu] =
 		    vcpu->arch.guest_user_mm.context.asid[cpu];
 		    vcpu->arch.guest_user_mm.context.asid[cpu];
-		vcpu->arch.last_user_gasid = gasid;
 		newasid++;
 		newasid++;
 
 
 		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
 		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,

+ 22 - 22
arch/mips/lib/dump_tlb.c

@@ -135,42 +135,42 @@ static void dump_tlb(int first, int last)
 		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 
 
-		printk("va=%0*lx asid=%0*lx",
-		       vwidth, (entryhi & ~0x1fffUL),
-		       asidwidth, entryhi & asidmask);
+		pr_cont("va=%0*lx asid=%0*lx",
+			vwidth, (entryhi & ~0x1fffUL),
+			asidwidth, entryhi & asidmask);
 		if (cpu_has_guestid)
 		if (cpu_has_guestid)
-			printk(" gid=%02lx",
-			       (guestctl1 & MIPS_GCTL1_RID)
+			pr_cont(" gid=%02lx",
+				(guestctl1 & MIPS_GCTL1_RID)
 					>> MIPS_GCTL1_RID_SHIFT);
 					>> MIPS_GCTL1_RID_SHIFT);
 		/* RI/XI are in awkward places, so mask them off separately */
 		/* RI/XI are in awkward places, so mask them off separately */
 		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		if (xpa)
 		if (xpa)
 			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
 			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
 		pa = (pa << 6) & PAGE_MASK;
 		pa = (pa << 6) & PAGE_MASK;
-		printk("\n\t[");
+		pr_cont("\n\t[");
 		if (cpu_has_rixi)
 		if (cpu_has_rixi)
-			printk("ri=%d xi=%d ",
-			       (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
-			       (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
-		printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
-		       pwidth, pa, c0,
-		       (entrylo0 & ENTRYLO_D) ? 1 : 0,
-		       (entrylo0 & ENTRYLO_V) ? 1 : 0,
-		       (entrylo0 & ENTRYLO_G) ? 1 : 0);
+			pr_cont("ri=%d xi=%d ",
+				(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
+				(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
+		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
+			pwidth, pa, c0,
+			(entrylo0 & ENTRYLO_D) ? 1 : 0,
+			(entrylo0 & ENTRYLO_V) ? 1 : 0,
+			(entrylo0 & ENTRYLO_G) ? 1 : 0);
 		/* RI/XI are in awkward places, so mask them off separately */
 		/* RI/XI are in awkward places, so mask them off separately */
 		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		if (xpa)
 		if (xpa)
 			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
 			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
 		pa = (pa << 6) & PAGE_MASK;
 		pa = (pa << 6) & PAGE_MASK;
 		if (cpu_has_rixi)
 		if (cpu_has_rixi)
-			printk("ri=%d xi=%d ",
-			       (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
-			       (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
-		printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
-		       pwidth, pa, c1,
-		       (entrylo1 & ENTRYLO_D) ? 1 : 0,
-		       (entrylo1 & ENTRYLO_V) ? 1 : 0,
-		       (entrylo1 & ENTRYLO_G) ? 1 : 0);
+			pr_cont("ri=%d xi=%d ",
+				(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
+				(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
+		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
+			pwidth, pa, c1,
+			(entrylo1 & ENTRYLO_D) ? 1 : 0,
+			(entrylo1 & ENTRYLO_V) ? 1 : 0,
+			(entrylo1 & ENTRYLO_G) ? 1 : 0);
 	}
 	}
 	printk("\n");
 	printk("\n");
 
 

+ 9 - 9
arch/mips/lib/r3k_dump_tlb.c

@@ -53,15 +53,15 @@ static void dump_tlb(int first, int last)
 			 */
 			 */
 			printk("Index: %2d ", i);
 			printk("Index: %2d ", i);
 
 
-			printk("va=%08lx asid=%08lx"
-			       "  [pa=%06lx n=%d d=%d v=%d g=%d]",
-			       entryhi & PAGE_MASK,
-			       entryhi & asid_mask,
-			       entrylo0 & PAGE_MASK,
-			       (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
-			       (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
-			       (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
-			       (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
+			pr_cont("va=%08lx asid=%08lx"
+				"  [pa=%06lx n=%d d=%d v=%d g=%d]",
+				entryhi & PAGE_MASK,
+				entryhi & asid_mask,
+				entrylo0 & PAGE_MASK,
+				(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
+				(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+				(entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
+				(entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
 		}
 		}
 	}
 	}
 	printk("\n");
 	printk("\n");

+ 1 - 0
arch/nios2/kernel/time.c

@@ -324,6 +324,7 @@ static int __init nios2_time_init(struct device_node *timer)
 		ret = nios2_clocksource_init(timer);
 		ret = nios2_clocksource_init(timer);
 		break;
 		break;
 	default:
 	default:
+		ret = 0;
 		break;
 		break;
 	}
 	}
 
 

+ 2 - 0
arch/openrisc/include/asm/cache.h

@@ -23,6 +23,8 @@
  * they shouldn't be hard-coded!
  * they shouldn't be hard-coded!
  */
  */
 
 
+#define __ro_after_init __read_mostly
+
 #define L1_CACHE_BYTES 16
 #define L1_CACHE_BYTES 16
 #define L1_CACHE_SHIFT 4
 #define L1_CACHE_SHIFT 4
 
 

+ 3 - 1
arch/parisc/include/uapi/asm/unistd.h

@@ -368,7 +368,9 @@
 
 
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_fadvise64	/* fadvise64_64 */
 #define __IGNORE_fadvise64	/* fadvise64_64 */
-
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
 
 
 #define LINUX_GATEWAY_ADDR      0x100
 #define LINUX_GATEWAY_ADDR      0x100
 
 

+ 3 - 3
arch/parisc/kernel/drivers.c

@@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev)
 
 
 	if (dev->num_addrs) {
 	if (dev->num_addrs) {
 		int k;
 		int k;
-		printk(", additional addresses: ");
+		pr_cont(", additional addresses: ");
 		for (k = 0; k < dev->num_addrs; k++)
 		for (k = 0; k < dev->num_addrs; k++)
-			printk("0x%lx ", dev->addr[k]);
+			pr_cont("0x%lx ", dev->addr[k]);
 	}
 	}
-	printk("\n");
+	pr_cont("\n");
 }
 }
 
 
 /**
 /**

+ 34 - 32
arch/parisc/kernel/syscall.S

@@ -100,14 +100,12 @@ set_thread_pointer:
 	.endr
 	.endr
 
 
 /* This address must remain fixed at 0x100 for glibc's syscalls to work */
 /* This address must remain fixed at 0x100 for glibc's syscalls to work */
-	.align 256
+	.align LINUX_GATEWAY_ADDR
 linux_gateway_entry:
 linux_gateway_entry:
 	gate	.+8, %r0			/* become privileged */
 	gate	.+8, %r0			/* become privileged */
 	mtsp	%r0,%sr4			/* get kernel space into sr4 */
 	mtsp	%r0,%sr4			/* get kernel space into sr4 */
 	mtsp	%r0,%sr5			/* get kernel space into sr5 */
 	mtsp	%r0,%sr5			/* get kernel space into sr5 */
 	mtsp	%r0,%sr6			/* get kernel space into sr6 */
 	mtsp	%r0,%sr6			/* get kernel space into sr6 */
-	mfsp    %sr7,%r1                        /* save user sr7 */
-	mtsp    %r1,%sr3                        /* and store it in sr3 */
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 	/* for now we can *always* set the W bit on entry to the syscall
 	/* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
 	depdi	0, 31, 32, %r21
 	depdi	0, 31, 32, %r21
 1:	
 1:	
 #endif
 #endif
+
+	/* We use a rsm/ssm pair to prevent sr3 from being clobbered
+	 * by external interrupts.
+	 */
+	mfsp    %sr7,%r1                        /* save user sr7 */
+	rsm	PSW_SM_I, %r0			/* disable interrupts */
+	mtsp    %r1,%sr3                        /* and store it in sr3 */
+
 	mfctl   %cr30,%r1
 	mfctl   %cr30,%r1
 	xor     %r1,%r30,%r30                   /* ye olde xor trick */
 	xor     %r1,%r30,%r30                   /* ye olde xor trick */
 	xor     %r1,%r30,%r1
 	xor     %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
 	 */
 	 */
 
 
 	mtsp	%r0,%sr7			/* get kernel space into sr7 */
 	mtsp	%r0,%sr7			/* get kernel space into sr7 */
+	ssm	PSW_SM_I, %r0			/* enable interrupts */
 	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
 	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
 	mfctl	%cr30,%r1			/* get task ptr in %r1 */
 	mfctl	%cr30,%r1			/* get task ptr in %r1 */
 	LDREG	TI_TASK(%r1),%r1
 	LDREG	TI_TASK(%r1),%r1
@@ -474,11 +481,6 @@ lws_start:
 	comiclr,>>	__NR_lws_entries, %r20, %r0
 	comiclr,>>	__NR_lws_entries, %r20, %r0
 	b,n	lws_exit_nosys
 	b,n	lws_exit_nosys
 
 
-	/* WARNING: Trashing sr2 and sr3 */
-	mfsp	%sr7,%r1			/* get userspace into sr3 */
-	mtsp	%r1,%sr3
-	mtsp	%r0,%sr2			/* get kernel space into sr2 */
-
 	/* Load table start */
 	/* Load table start */
 	ldil	L%lws_table, %r1
 	ldil	L%lws_table, %r1
 	ldo	R%lws_table(%r1), %r28	/* Scratch use of r28 */
 	ldo	R%lws_table(%r1), %r28	/* Scratch use of r28 */
@@ -627,9 +629,9 @@ cas_action:
 	stw	%r1, 4(%sr2,%r20)
 	stw	%r1, 4(%sr2,%r20)
 #endif
 #endif
 	/* The load and store could fail */
 	/* The load and store could fail */
-1:	ldw,ma	0(%sr3,%r26), %r28
+1:	ldw,ma	0(%r26), %r28
 	sub,<>	%r28, %r25, %r0
 	sub,<>	%r28, %r25, %r0
-2:	stw,ma	%r24, 0(%sr3,%r26)
+2:	stw,ma	%r24, 0(%r26)
 	/* Free lock */
 	/* Free lock */
 	stw,ma	%r20, 0(%sr2,%r20)
 	stw,ma	%r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
 #if ENABLE_LWS_DEBUG
@@ -706,9 +708,9 @@ lws_compare_and_swap_2:
 	nop
 	nop
 
 
 	/* 8bit load */
 	/* 8bit load */
-4:	ldb	0(%sr3,%r25), %r25
+4:	ldb	0(%r25), %r25
 	b	cas2_lock_start
 	b	cas2_lock_start
-5:	ldb	0(%sr3,%r24), %r24
+5:	ldb	0(%r24), %r24
 	nop
 	nop
 	nop
 	nop
 	nop
 	nop
@@ -716,9 +718,9 @@ lws_compare_and_swap_2:
 	nop
 	nop
 
 
 	/* 16bit load */
 	/* 16bit load */
-6:	ldh	0(%sr3,%r25), %r25
+6:	ldh	0(%r25), %r25
 	b	cas2_lock_start
 	b	cas2_lock_start
-7:	ldh	0(%sr3,%r24), %r24
+7:	ldh	0(%r24), %r24
 	nop
 	nop
 	nop
 	nop
 	nop
 	nop
@@ -726,9 +728,9 @@ lws_compare_and_swap_2:
 	nop
 	nop
 
 
 	/* 32bit load */
 	/* 32bit load */
-8:	ldw	0(%sr3,%r25), %r25
+8:	ldw	0(%r25), %r25
 	b	cas2_lock_start
 	b	cas2_lock_start
-9:	ldw	0(%sr3,%r24), %r24
+9:	ldw	0(%r24), %r24
 	nop
 	nop
 	nop
 	nop
 	nop
 	nop
@@ -737,14 +739,14 @@ lws_compare_and_swap_2:
 
 
 	/* 64bit load */
 	/* 64bit load */
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
-10:	ldd	0(%sr3,%r25), %r25
-11:	ldd	0(%sr3,%r24), %r24
+10:	ldd	0(%r25), %r25
+11:	ldd	0(%r24), %r24
 #else
 #else
 	/* Load new value into r22/r23 - high/low */
 	/* Load new value into r22/r23 - high/low */
-10:	ldw	0(%sr3,%r25), %r22
-11:	ldw	4(%sr3,%r25), %r23
+10:	ldw	0(%r25), %r22
+11:	ldw	4(%r25), %r23
 	/* Load new value into fr4 for atomic store later */
 	/* Load new value into fr4 for atomic store later */
-12:	flddx	0(%sr3,%r24), %fr4
+12:	flddx	0(%r24), %fr4
 #endif
 #endif
 
 
 cas2_lock_start:
 cas2_lock_start:
@@ -794,30 +796,30 @@ cas2_action:
 	ldo	1(%r0),%r28
 	ldo	1(%r0),%r28
 
 
 	/* 8bit CAS */
 	/* 8bit CAS */
-13:	ldb,ma	0(%sr3,%r26), %r29
+13:	ldb,ma	0(%r26), %r29
 	sub,=	%r29, %r25, %r0
 	sub,=	%r29, %r25, %r0
 	b,n	cas2_end
 	b,n	cas2_end
-14:	stb,ma	%r24, 0(%sr3,%r26)
+14:	stb,ma	%r24, 0(%r26)
 	b	cas2_end
 	b	cas2_end
 	copy	%r0, %r28
 	copy	%r0, %r28
 	nop
 	nop
 	nop
 	nop
 
 
 	/* 16bit CAS */
 	/* 16bit CAS */
-15:	ldh,ma	0(%sr3,%r26), %r29
+15:	ldh,ma	0(%r26), %r29
 	sub,=	%r29, %r25, %r0
 	sub,=	%r29, %r25, %r0
 	b,n	cas2_end
 	b,n	cas2_end
-16:	sth,ma	%r24, 0(%sr3,%r26)
+16:	sth,ma	%r24, 0(%r26)
 	b	cas2_end
 	b	cas2_end
 	copy	%r0, %r28
 	copy	%r0, %r28
 	nop
 	nop
 	nop
 	nop
 
 
 	/* 32bit CAS */
 	/* 32bit CAS */
-17:	ldw,ma	0(%sr3,%r26), %r29
+17:	ldw,ma	0(%r26), %r29
 	sub,=	%r29, %r25, %r0
 	sub,=	%r29, %r25, %r0
 	b,n	cas2_end
 	b,n	cas2_end
-18:	stw,ma	%r24, 0(%sr3,%r26)
+18:	stw,ma	%r24, 0(%r26)
 	b	cas2_end
 	b	cas2_end
 	copy	%r0, %r28
 	copy	%r0, %r28
 	nop
 	nop
@@ -825,22 +827,22 @@ cas2_action:
 
 
 	/* 64bit CAS */
 	/* 64bit CAS */
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
-19:	ldd,ma	0(%sr3,%r26), %r29
+19:	ldd,ma	0(%r26), %r29
 	sub,*=	%r29, %r25, %r0
 	sub,*=	%r29, %r25, %r0
 	b,n	cas2_end
 	b,n	cas2_end
-20:	std,ma	%r24, 0(%sr3,%r26)
+20:	std,ma	%r24, 0(%r26)
 	copy	%r0, %r28
 	copy	%r0, %r28
 #else
 #else
 	/* Compare first word */
 	/* Compare first word */
-19:	ldw,ma	0(%sr3,%r26), %r29
+19:	ldw,ma	0(%r26), %r29
 	sub,=	%r29, %r22, %r0
 	sub,=	%r29, %r22, %r0
 	b,n	cas2_end
 	b,n	cas2_end
 	/* Compare second word */
 	/* Compare second word */
-20:	ldw,ma	4(%sr3,%r26), %r29
+20:	ldw,ma	4(%r26), %r29
 	sub,=	%r29, %r23, %r0
 	sub,=	%r29, %r23, %r0
 	b,n	cas2_end
 	b,n	cas2_end
 	/* Perform the store */
 	/* Perform the store */
-21:	fstdx	%fr4, 0(%sr3,%r26)
+21:	fstdx	%fr4, 0(%r26)
 	copy	%r0, %r28
 	copy	%r0, %r28
 #endif
 #endif
 
 

+ 12 - 3
arch/powerpc/include/asm/exception-64s.h

@@ -91,7 +91,7 @@
  */
  */
 #define LOAD_HANDLER(reg, label)					\
 #define LOAD_HANDLER(reg, label)					\
 	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
 	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
-	ori	reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
 
 
 #define __LOAD_HANDLER(reg, label)					\
 #define __LOAD_HANDLER(reg, label)					\
 	ld	reg,PACAKBASE(r13);					\
 	ld	reg,PACAKBASE(r13);					\
@@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943)						\
 	std	ra,offset(r13);						\
 	std	ra,offset(r13);						\
 END_FTR_SECTION_NESTED(ftr,ftr,943)
 END_FTR_SECTION_NESTED(ftr,ftr,943)
 
 
-#define EXCEPTION_PROLOG_0(area)					\
-	GET_PACA(r13);							\
+#define EXCEPTION_PROLOG_0_PACA(area)					\
 	std	r9,area+EX_R9(r13);	/* save r9 */			\
 	std	r9,area+EX_R9(r13);	/* save r9 */			\
 	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);			\
 	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);			\
 	HMT_MEDIUM;							\
 	HMT_MEDIUM;							\
 	std	r10,area+EX_R10(r13);	/* save r10 - r12 */		\
 	std	r10,area+EX_R10(r13);	/* save r10 - r12 */		\
 	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
 	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
 
 
+#define EXCEPTION_PROLOG_0(area)					\
+	GET_PACA(r13);							\
+	EXCEPTION_PROLOG_0_PACA(area)
+
 #define __EXCEPTION_PROLOG_1(area, extra, vec)				\
 #define __EXCEPTION_PROLOG_1(area, extra, vec)				\
 	OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);		\
 	OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);		\
 	OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);		\
 	OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);		\
@@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 	EXCEPTION_PROLOG_1(area, extra, vec);				\
 	EXCEPTION_PROLOG_1(area, extra, vec);				\
 	EXCEPTION_PROLOG_PSERIES_1(label, h);
 	EXCEPTION_PROLOG_PSERIES_1(label, h);
 
 
+/* Have the PACA in r13 already */
+#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec)	\
+	EXCEPTION_PROLOG_0_PACA(area);					\
+	EXCEPTION_PROLOG_1(area, extra, vec);				\
+	EXCEPTION_PROLOG_PSERIES_1(label, h);
+
 #define __KVMTEST(h, n)							\
 #define __KVMTEST(h, n)							\
 	lbz	r10,HSTATE_IN_GUEST(r13);				\
 	lbz	r10,HSTATE_IN_GUEST(r13);				\
 	cmpwi	r10,0;							\
 	cmpwi	r10,0;							\

+ 1 - 0
arch/powerpc/include/asm/ppc-opcode.h

@@ -460,5 +460,6 @@
 
 
 #define PPC_SLBIA(IH)	stringify_in_c(.long PPC_INST_SLBIA | \
 #define PPC_SLBIA(IH)	stringify_in_c(.long PPC_INST_SLBIA | \
 				       ((IH & 0x7) << 21))
 				       ((IH & 0x7) << 21))
+#define PPC_INVALIDATE_ERAT	PPC_SLBIA(7)
 
 
 #endif /* _ASM_POWERPC_PPC_OPCODE_H */
 #endif /* _ASM_POWERPC_PPC_OPCODE_H */

+ 8 - 3
arch/powerpc/kernel/exceptions-64s.S

@@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100)
 
 
 EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
 EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
 	SET_SCRATCH0(r13)
 	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+	GET_PACA(r13)
+	clrrdi	r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
+	EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
 				 IDLETEST, 0x100)
 				 IDLETEST, 0x100)
 
 
 EXC_REAL_END(system_reset, 0x100, 0x200)
 EXC_REAL_END(system_reset, 0x100, 0x200)
@@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200)
 
 
 #ifdef CONFIG_PPC_P7_NAP
 #ifdef CONFIG_PPC_P7_NAP
 EXC_COMMON_BEGIN(system_reset_idle_common)
 EXC_COMMON_BEGIN(system_reset_idle_common)
+BEGIN_FTR_SECTION
+	GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 	bl	pnv_restore_hyp_resource
 	bl	pnv_restore_hyp_resource
 
 
 	li	r0,PNV_THREAD_RUNNING
 	li	r0,PNV_THREAD_RUNNING
@@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
 	SET_SCRATCH0(r13)		/* save r13 */
 	SET_SCRATCH0(r13)		/* save r13 */
 	/*
 	/*
 	 * Running native on arch 2.06 or later, we may wakeup from winkle
 	 * Running native on arch 2.06 or later, we may wakeup from winkle
-	 * inside machine check. If yes, then last bit of HSPGR0 would be set
+	 * inside machine check. If yes, then last bit of HSPRG0 would be set
 	 * to 1. Hence clear it unconditionally.
 	 * to 1. Hence clear it unconditionally.
 	 */
 	 */
 	GET_PACA(r13)
 	GET_PACA(r13)
@@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
 	/*
 	/*
 	 * Go back to winkle. Please note that this thread was woken up in
 	 * Go back to winkle. Please note that this thread was woken up in
 	 * machine check from winkle and have not restored the per-subcore
 	 * machine check from winkle and have not restored the per-subcore
-	 * state. Hence before going back to winkle, set last bit of HSPGR0
+	 * state. Hence before going back to winkle, set last bit of HSPRG0
 	 * to 1. This will make sure that if this thread gets woken up
 	 * to 1. This will make sure that if this thread gets woken up
 	 * again at reset vector 0x100 then it will get chance to restore
 	 * again at reset vector 0x100 then it will get chance to restore
 	 * the subcore state.
 	 * the subcore state.

+ 21 - 21
arch/powerpc/kernel/process.c

@@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs)
 		int instr;
 		int instr;
 
 
 		if (!(i % 8))
 		if (!(i % 8))
-			printk("\n");
+			pr_cont("\n");
 
 
 #if !defined(CONFIG_BOOKE)
 #if !defined(CONFIG_BOOKE)
 		/* If executing with the IMMU off, adjust pc rather
 		/* If executing with the IMMU off, adjust pc rather
@@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs)
 
 
 		if (!__kernel_text_address(pc) ||
 		if (!__kernel_text_address(pc) ||
 		     probe_kernel_address((unsigned int __user *)pc, instr)) {
 		     probe_kernel_address((unsigned int __user *)pc, instr)) {
-			printk(KERN_CONT "XXXXXXXX ");
+			pr_cont("XXXXXXXX ");
 		} else {
 		} else {
 			if (regs->nip == pc)
 			if (regs->nip == pc)
-				printk(KERN_CONT "<%08x> ", instr);
+				pr_cont("<%08x> ", instr);
 			else
 			else
-				printk(KERN_CONT "%08x ", instr);
+				pr_cont("%08x ", instr);
 		}
 		}
 
 
 		pc += sizeof(int);
 		pc += sizeof(int);
 	}
 	}
 
 
-	printk("\n");
+	pr_cont("\n");
 }
 }
 
 
 struct regbit {
 struct regbit {
@@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
 
 
 	for (; bits->bit; ++bits)
 	for (; bits->bit; ++bits)
 		if (val & bits->bit) {
 		if (val & bits->bit) {
-			printk("%s%s", s, bits->name);
+			pr_cont("%s%s", s, bits->name);
 			s = sep;
 			s = sep;
 		}
 		}
 }
 }
@@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val)
  *   T: Transactional	(bit 34)
  *   T: Transactional	(bit 34)
  */
  */
 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
-		printk(",TM[");
+		pr_cont(",TM[");
 		print_bits(val, msr_tm_bits, "");
 		print_bits(val, msr_tm_bits, "");
-		printk("]");
+		pr_cont("]");
 	}
 	}
 }
 }
 #else
 #else
@@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {}
 
 
 static void print_msr_bits(unsigned long val)
 static void print_msr_bits(unsigned long val)
 {
 {
-	printk("<");
+	pr_cont("<");
 	print_bits(val, msr_bits, ",");
 	print_bits(val, msr_bits, ",");
 	print_tm_bits(val);
 	print_tm_bits(val);
-	printk(">");
+	pr_cont(">");
 }
 }
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
@@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs)
 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
 	trap = TRAP(regs);
 	trap = TRAP(regs);
 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
-		printk("CFAR: "REG" ", regs->orig_gpr3);
+		pr_cont("CFAR: "REG" ", regs->orig_gpr3);
 	if (trap == 0x200 || trap == 0x300 || trap == 0x600)
 	if (trap == 0x200 || trap == 0x300 || trap == 0x600)
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-		printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
+		pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
 #else
 #else
-		printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+		pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
 #endif
 #endif
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-	printk("SOFTE: %ld ", regs->softe);
+	pr_cont("SOFTE: %ld ", regs->softe);
 #endif
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	if (MSR_TM_ACTIVE(regs->msr))
 	if (MSR_TM_ACTIVE(regs->msr))
-		printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
+		pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
 #endif
 #endif
 
 
 	for (i = 0;  i < 32;  i++) {
 	for (i = 0;  i < 32;  i++) {
 		if ((i % REGS_PER_LINE) == 0)
 		if ((i % REGS_PER_LINE) == 0)
-			printk("\nGPR%02d: ", i);
-		printk(REG " ", regs->gpr[i]);
+			pr_cont("\nGPR%02d: ", i);
+		pr_cont(REG " ", regs->gpr[i]);
 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
 			break;
 			break;
 	}
 	}
-	printk("\n");
+	pr_cont("\n");
 #ifdef CONFIG_KALLSYMS
 #ifdef CONFIG_KALLSYMS
 	/*
 	/*
 	 * Lookup NIP late so we have the best change of getting the
 	 * Lookup NIP late so we have the best change of getting the
@@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 			if ((ip == rth) && curr_frame >= 0) {
 			if ((ip == rth) && curr_frame >= 0) {
-				printk(" (%pS)",
+				pr_cont(" (%pS)",
 				       (void *)current->ret_stack[curr_frame].ret);
 				       (void *)current->ret_stack[curr_frame].ret);
 				curr_frame--;
 				curr_frame--;
 			}
 			}
 #endif
 #endif
 			if (firstframe)
 			if (firstframe)
-				printk(" (unreliable)");
-			printk("\n");
+				pr_cont(" (unreliable)");
+			pr_cont("\n");
 		}
 		}
 		firstframe = 0;
 		firstframe = 0;
 
 

+ 14 - 6
arch/powerpc/kernel/setup_64.c

@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
 		if (firmware_has_feature(FW_FEATURE_OPAL))
 		if (firmware_has_feature(FW_FEATURE_OPAL))
 			opal_configure_cores();
 			opal_configure_cores();
 
 
-		/* Enable AIL if supported, and we are in hypervisor mode */
-		if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
-		    early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
-			unsigned long lpcr = mfspr(SPRN_LPCR);
-			mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
-		}
+		/* AIL on native is done in cpu_ready_for_interrupts() */
 	}
 	}
 }
 }
 
 
 static void cpu_ready_for_interrupts(void)
 static void cpu_ready_for_interrupts(void)
 {
 {
+	/*
+	 * Enable AIL if supported, and we are in hypervisor mode. This
+	 * is called once for every processor.
+	 *
+	 * If we are not in hypervisor mode the job is done once for
+	 * the whole partition in configure_exceptions().
+	 */
+	if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+	    early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		unsigned long lpcr = mfspr(SPRN_LPCR);
+		mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+	}
+
 	/* Set IR and DR in PACA MSR */
 	/* Set IR and DR in PACA MSR */
 	get_paca()->kernel_msr = MSR_KERNEL;
 	get_paca()->kernel_msr = MSR_KERNEL;
 }
 }

+ 4 - 0
arch/powerpc/mm/hash_utils_64.c

@@ -1029,6 +1029,10 @@ void hash__early_init_mmu_secondary(void)
 {
 {
 	/* Initialize hash table for that CPU */
 	/* Initialize hash table for that CPU */
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			update_hid_for_hash();
+
 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
 			mtspr(SPRN_SDR1, _SDR1);
 			mtspr(SPRN_SDR1, _SDR1);
 		else
 		else

+ 4 - 0
arch/powerpc/mm/pgtable-radix.c

@@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void)
 	 * update partition table control register and UPRT
 	 * update partition table control register and UPRT
 	 */
 	 */
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			update_hid_for_radix();
+
 		lpcr = mfspr(SPRN_LPCR);
 		lpcr = mfspr(SPRN_LPCR);
 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 
 

+ 4 - 0
arch/powerpc/mm/tlb-radix.c

@@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
 	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
 	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
 		__tlbiel_pid(pid, set, ric);
 		__tlbiel_pid(pid, set, ric);
 	}
 	}
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 	return;
 	return;
 }
 }
 
 
@@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 	asm volatile("ptesync": : :"memory");
 	asm volatile("ptesync": : :"memory");
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 }
 }
 
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
 static inline void _tlbie_va(unsigned long va, unsigned long pid,

+ 3 - 3
arch/s390/hypfs/hypfs_diag.c

@@ -363,11 +363,11 @@ out:
 static int diag224_get_name_table(void)
 static int diag224_get_name_table(void)
 {
 {
 	/* memory must be below 2GB */
 	/* memory must be below 2GB */
-	diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+	diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
 	if (!diag224_cpu_names)
 	if (!diag224_cpu_names)
 		return -ENOMEM;
 		return -ENOMEM;
 	if (diag224(diag224_cpu_names)) {
 	if (diag224(diag224_cpu_names)) {
-		kfree(diag224_cpu_names);
+		free_page((unsigned long) diag224_cpu_names);
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}
 	EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
 	EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
 
 
 static void diag224_delete_name_table(void)
 static void diag224_delete_name_table(void)
 {
 {
-	kfree(diag224_cpu_names);
+	free_page((unsigned long) diag224_cpu_names);
 }
 }
 
 
 static int diag224_idx2name(int index, char *name)
 static int diag224_idx2name(int index, char *name)

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است