浏览代码

Merge tag 'v3.10-rc6' into x86/cleanups

Linux 3.10-rc6

We need a change that is the mainline tree for further work.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
H. Peter Anvin 12 年之前
父节点
当前提交
e6bca5a6a8
共有 100 个文件被更改,包括 1063 次插入531 次删除
  1. 9 3
      Documentation/bcache.txt
  2. 2 6
      Documentation/devices.txt
  3. 1 1
      Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
  4. 3 3
      Documentation/dmatest.txt
  5. 3 0
      Documentation/filesystems/xfs.txt
  6. 0 3
      Documentation/kernel-parameters.txt
  7. 0 2
      Documentation/m68k/kernel-options.txt
  8. 25 2
      Documentation/powerpc/transactional_memory.txt
  9. 25 7
      MAINTAINERS
  10. 1 1
      Makefile
  11. 1 1
      arch/arm/boot/compressed/Makefile
  12. 28 0
      arch/arm/boot/compressed/debug.S
  13. 1 0
      arch/arm/boot/compressed/head-sa1100.S
  14. 1 0
      arch/arm/boot/compressed/head-shark.S
  15. 3 2
      arch/arm/boot/compressed/head.S
  16. 2 2
      arch/arm/boot/dts/am33xx.dtsi
  17. 3 2
      arch/arm/boot/dts/armada-xp-gp.dts
  18. 3 2
      arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
  19. 1 0
      arch/arm/boot/dts/bcm2835.dtsi
  20. 6 6
      arch/arm/boot/dts/imx25.dtsi
  21. 3 3
      arch/arm/boot/dts/imx27.dtsi
  22. 1 1
      arch/arm/boot/dts/imx51.dtsi
  23. 1 1
      arch/arm/boot/dts/imx53.dtsi
  24. 20 0
      arch/arm/boot/dts/omap4-panda-common.dtsi
  25. 20 0
      arch/arm/boot/dts/omap4-sdp.dts
  26. 3 0
      arch/arm/boot/dts/omap5.dtsi
  27. 9 2
      arch/arm/include/asm/percpu.h
  28. 4 23
      arch/arm/include/asm/tlb.h
  29. 2 0
      arch/arm/kernel/topology.c
  30. 13 2
      arch/arm/kvm/arm.c
  31. 26 15
      arch/arm/kvm/mmu.c
  32. 2 0
      arch/arm/mach-exynos/common.c
  33. 2 2
      arch/arm/mach-imx/clk-imx6q.c
  34. 0 10
      arch/arm/mach-kirkwood/board-ts219.c
  35. 3 2
      arch/arm/mach-kirkwood/mpp.c
  36. 11 5
      arch/arm/mach-mvebu/coherency_ll.S
  37. 9 9
      arch/arm/mach-omap2/clock36xx.c
  38. 8 1
      arch/arm/mach-omap2/omap_hwmod_33xx_data.c
  39. 4 2
      arch/arm/mach-omap2/pm34xx.c
  40. 4 2
      arch/arm/mach-prima2/pm.c
  41. 4 2
      arch/arm/mach-prima2/rstc.c
  42. 1 1
      arch/arm/mach-shmobile/setup-sh73a0.c
  43. 3 0
      arch/arm/mach-ux500/board-mop500-regulators.c
  44. 4 0
      arch/arm/mach-ux500/cpuidle.c
  45. 9 1
      arch/arm/plat-samsung/include/plat/uncompress.h
  46. 13 5
      arch/arm/plat-samsung/pm.c
  47. 1 0
      arch/arm64/kernel/arm64ksyms.c
  48. 10 0
      arch/arm64/kernel/entry.S
  49. 12 5
      arch/arm64/kernel/traps.c
  50. 2 1
      arch/arm64/mm/fault.c
  51. 8 33
      arch/ia64/include/asm/tlb.h
  52. 2 1
      arch/m68k/include/asm/gpio.h
  53. 19 10
      arch/m68k/kernel/head.S
  54. 18 16
      arch/microblaze/include/asm/cacheflush.h
  55. 2 2
      arch/microblaze/include/asm/uaccess.h
  56. 9 6
      arch/mips/cavium-octeon/setup.c
  57. 0 4
      arch/mips/include/asm/kvm_host.h
  58. 1 1
      arch/mips/include/asm/mmu_context.h
  59. 32 0
      arch/mips/include/asm/ptrace.h
  60. 107 27
      arch/mips/include/uapi/asm/kvm.h
  61. 2 15
      arch/mips/include/uapi/asm/ptrace.h
  62. 11 0
      arch/mips/kernel/binfmt_elfn32.c
  63. 11 0
      arch/mips/kernel/binfmt_elfo32.c
  64. 4 0
      arch/mips/kernel/ftrace.c
  65. 7 6
      arch/mips/kernel/idle.c
  66. 1 0
      arch/mips/kernel/rtlx.c
  67. 15 13
      arch/mips/kernel/traps.c
  68. 284 21
      arch/mips/kvm/kvm_mips.c
  69. 0 50
      arch/mips/kvm/kvm_trap_emul.c
  70. 0 4
      arch/mips/mm/tlbex.c
  71. 1 1
      arch/mips/ralink/of.c
  72. 1 1
      arch/parisc/Makefile
  73. 1 4
      arch/parisc/include/asm/mmzone.h
  74. 1 1
      arch/parisc/kernel/drivers.c
  75. 2 1
      arch/parisc/kernel/setup.c
  76. 10 7
      arch/powerpc/include/asm/cputable.h
  77. 1 1
      arch/powerpc/include/asm/exception-64s.h
  78. 1 0
      arch/powerpc/include/asm/hvcall.h
  79. 10 6
      arch/powerpc/include/asm/kvm_asm.h
  80. 11 0
      arch/powerpc/include/asm/ppc_asm.h
  81. 4 9
      arch/powerpc/include/asm/processor.h
  82. 0 11
      arch/powerpc/include/asm/reg.h
  83. 3 0
      arch/powerpc/include/asm/signal.h
  84. 2 0
      arch/powerpc/include/asm/tm.h
  85. 1 0
      arch/powerpc/include/uapi/asm/Kbuild
  86. 18 0
      arch/powerpc/include/uapi/asm/tm.h
  87. 3 3
      arch/powerpc/kernel/cputable.c
  88. 1 1
      arch/powerpc/kernel/entry_32.S
  89. 7 28
      arch/powerpc/kernel/entry_64.S
  90. 27 65
      arch/powerpc/kernel/exceptions-64s.S
  91. 1 1
      arch/powerpc/kernel/irq.c
  92. 4 14
      arch/powerpc/kernel/pci-common.c
  93. 4 3
      arch/powerpc/kernel/process.c
  94. 38 2
      arch/powerpc/kernel/signal.c
  95. 1 1
      arch/powerpc/kernel/signal.h
  96. 2 8
      arch/powerpc/kernel/signal_32.c
  97. 7 16
      arch/powerpc/kernel/signal_64.c
  98. 39 0
      arch/powerpc/kernel/traps.c
  99. 5 0
      arch/powerpc/kvm/44x_tlb.c
  100. 2 0
      arch/powerpc/kvm/book3s_hv.c

+ 9 - 3
Documentation/bcache.txt

@@ -319,7 +319,10 @@ cache<0..n>
   Symlink to each of the cache devices comprising this cache set. 
   Symlink to each of the cache devices comprising this cache set. 
 
 
 cache_available_percent
 cache_available_percent
-  Percentage of cache device free.
+  Percentage of cache device which doesn't contain dirty data, and could
+  potentially be used for writeback.  This doesn't mean this space isn't used
+  for clean cached data; the unused statistic (in priority_stats) is typically
+  much lower.
 
 
 clear_stats
 clear_stats
   Clears the statistics associated with this cache
   Clears the statistics associated with this cache
@@ -423,8 +426,11 @@ nbuckets
   Total buckets in this cache
   Total buckets in this cache
 
 
 priority_stats
 priority_stats
-  Statistics about how recently data in the cache has been accessed.  This can
-  reveal your working set size.
+  Statistics about how recently data in the cache has been accessed.
+  This can reveal your working set size.  Unused is the percentage of
+  the cache that doesn't contain any data.  Metadata is bcache's
+  metadata overhead.  Average is the average priority of cache buckets.
+  Next is a list of quantiles with the priority threshold of each.
 
 
 written
 written
   Sum of all data that has been written to the cache; comparison with
   Sum of all data that has been written to the cache; comparison with

+ 2 - 6
Documentation/devices.txt

@@ -498,12 +498,8 @@ Your cooperation is appreciated.
 
 
 		Each device type has 5 bits (32 minors).
 		Each device type has 5 bits (32 minors).
 
 
- 13 block	8-bit MFM/RLL/IDE controller
-		  0 = /dev/xda		First XT disk whole disk
-		 64 = /dev/xdb		Second XT disk whole disk
-
-		Partitions are handled in the same way as IDE disks
-		(see major number 3).
+ 13 block	Previously used for the XT disk (/dev/xdN)
+		Deleted in kernel v3.9.
 
 
  14 char	Open Sound System (OSS)
  14 char	Open Sound System (OSS)
 		  0 = /dev/mixer	Mixer control
 		  0 = /dev/mixer	Mixer control

+ 1 - 1
Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt

@@ -1,7 +1,7 @@
 Atmel AT91RM9200 Real Time Clock
 Atmel AT91RM9200 Real Time Clock
 
 
 Required properties:
 Required properties:
-- compatible: should be: "atmel,at91rm9200-rtc"
+- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
 - reg: physical base address of the controller and length of memory mapped
 - reg: physical base address of the controller and length of memory mapped
   region.
   region.
 - interrupts: rtc alarm/event interrupt
 - interrupts: rtc alarm/event interrupt

+ 3 - 3
Documentation/dmatest.txt

@@ -34,7 +34,7 @@ command:
 After a while you will start to get messages about current status or error like
 After a while you will start to get messages about current status or error like
 in the original code.
 in the original code.
 
 
-Note that running a new test will stop any in progress test.
+Note that running a new test will not stop any in progress test.
 
 
 The following command should return actual state of the test.
 The following command should return actual state of the test.
 	% cat /sys/kernel/debug/dmatest/run
 	% cat /sys/kernel/debug/dmatest/run
@@ -52,8 +52,8 @@ To wait for test done the user may perform a busy loop that checks the state.
 
 
 The module parameters that is supplied to the kernel command line will be used
 The module parameters that is supplied to the kernel command line will be used
 for the first performed test. After user gets a control, the test could be
 for the first performed test. After user gets a control, the test could be
-interrupted or re-run with same or different parameters. For the details see
-the above section "Part 2 - When dmatest is built as a module..."
+re-run with the same or different parameters. For the details see the above
+section "Part 2 - When dmatest is built as a module..."
 
 
 In both cases the module parameters are used as initial values for the test case.
 In both cases the module parameters are used as initial values for the test case.
 You always could check them at run-time by running
 You always could check them at run-time by running

+ 3 - 0
Documentation/filesystems/xfs.txt

@@ -33,6 +33,9 @@ When mounting an XFS filesystem, the following options are accepted.
 	removing extended attributes) the on-disk superblock feature
 	removing extended attributes) the on-disk superblock feature
 	bit field will be updated to reflect this format being in use.
 	bit field will be updated to reflect this format being in use.
 
 
+	CRC enabled filesystems always use the attr2 format, and so
+	will reject the noattr2 mount option if it is set.
+
   barrier
   barrier
 	Enables the use of block layer write barriers for writes into
 	Enables the use of block layer write barriers for writes into
 	the journal and unwritten extent conversion.  This allows for
 	the journal and unwritten extent conversion.  This allows for

+ 0 - 3
Documentation/kernel-parameters.txt

@@ -3351,9 +3351,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			plus one apbt timer for broadcast timer.
 			plus one apbt timer for broadcast timer.
 			x86_mrst_timer=apbt_only | lapic_and_apbt
 			x86_mrst_timer=apbt_only | lapic_and_apbt
 
 
-	xd=		[HW,XT] Original XT pre-IDE (RLL encoded) disks.
-	xd_geo=		See header of drivers/block/xd.c.
-
 	xen_emul_unplug=		[HW,X86,XEN]
 	xen_emul_unplug=		[HW,X86,XEN]
 			Unplug Xen emulated devices
 			Unplug Xen emulated devices
 			Format: [unplug0,][unplug1]
 			Format: [unplug0,][unplug1]

+ 0 - 2
Documentation/m68k/kernel-options.txt

@@ -80,8 +80,6 @@ Valid names are:
   /dev/sdd: -> 0x0830 (forth SCSI disk)
   /dev/sdd: -> 0x0830 (forth SCSI disk)
   /dev/sde: -> 0x0840 (fifth SCSI disk)
   /dev/sde: -> 0x0840 (fifth SCSI disk)
   /dev/fd : -> 0x0200 (floppy disk)
   /dev/fd : -> 0x0200 (floppy disk)
-  /dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k)
-  /dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k)
 
 
   The name must be followed by a decimal number, that stands for the
   The name must be followed by a decimal number, that stands for the
 partition number. Internally, the value of the number is just
 partition number. Internally, the value of the number is just

+ 25 - 2
Documentation/powerpc/transactional_memory.txt

@@ -147,6 +147,25 @@ Example signal handler:
       fix_the_problem(ucp->dar);
       fix_the_problem(ucp->dar);
     }
     }
 
 
+When in an active transaction that takes a signal, we need to be careful with
+the stack.  It's possible that the stack has moved back up after the tbegin.
+The obvious case here is when the tbegin is called inside a function that
+returns before a tend.  In this case, the stack is part of the checkpointed
+transactional memory state.  If we write over this non transactionally or in
+suspend, we are in trouble because if we get a tm abort, the program counter and
+stack pointer will be back at the tbegin but our in memory stack won't be valid
+anymore.
+
+To avoid this, when taking a signal in an active transaction, we need to use
+the stack pointer from the checkpointed state, rather than the speculated
+state.  This ensures that the signal context (written tm suspended) will be
+written below the stack required for the rollback.  The transaction is aborted
+becuase of the treclaim, so any memory written between the tbegin and the
+signal will be rolled back anyway.
+
+For signals taken in non-TM or suspended mode, we use the
+normal/non-checkpointed stack pointer.
+
 
 
 Failure cause codes used by kernel
 Failure cause codes used by kernel
 ==================================
 ==================================
@@ -155,14 +174,18 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the
 kernel aborted a transaction:
 kernel aborted a transaction:
 
 
  TM_CAUSE_RESCHED       Thread was rescheduled.
  TM_CAUSE_RESCHED       Thread was rescheduled.
+ TM_CAUSE_TLBI          Software TLB invalide.
  TM_CAUSE_FAC_UNAV      FP/VEC/VSX unavailable trap.
  TM_CAUSE_FAC_UNAV      FP/VEC/VSX unavailable trap.
  TM_CAUSE_SYSCALL       Currently unused; future syscalls that must abort
  TM_CAUSE_SYSCALL       Currently unused; future syscalls that must abort
                         transactions for consistency will use this.
                         transactions for consistency will use this.
  TM_CAUSE_SIGNAL        Signal delivered.
  TM_CAUSE_SIGNAL        Signal delivered.
  TM_CAUSE_MISC          Currently unused.
  TM_CAUSE_MISC          Currently unused.
+ TM_CAUSE_ALIGNMENT     Alignment fault.
+ TM_CAUSE_EMULATE       Emulation that touched memory.
 
 
-These can be checked by the user program's abort handler as TEXASR[0:7].
-
+These can be checked by the user program's abort handler as TEXASR[0:7].  If
+bit 7 is set, it indicates that the error is consider persistent.  For example
+a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q
 
 
 GDB
 GDB
 ===
 ===

+ 25 - 7
MAINTAINERS

@@ -2890,8 +2890,8 @@ F:	drivers/media/dvb-frontends/ec100*
 
 
 ECRYPT FILE SYSTEM
 ECRYPT FILE SYSTEM
 M:	Tyler Hicks <tyhicks@canonical.com>
 M:	Tyler Hicks <tyhicks@canonical.com>
-M:	Dustin Kirkland <dustin.kirkland@gazzang.com>
 L:	ecryptfs@vger.kernel.org
 L:	ecryptfs@vger.kernel.org
+W:	http://ecryptfs.org
 W:	https://launchpad.net/ecryptfs
 W:	https://launchpad.net/ecryptfs
 S:	Supported
 S:	Supported
 F:	Documentation/filesystems/ecryptfs.txt
 F:	Documentation/filesystems/ecryptfs.txt
@@ -3322,11 +3322,12 @@ F:	drivers/net/wan/dlci.c
 F:	drivers/net/wan/sdla.c
 F:	drivers/net/wan/sdla.c
 
 
 FRAMEBUFFER LAYER
 FRAMEBUFFER LAYER
-M:	Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+M:	Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
+M:	Tomi Valkeinen <tomi.valkeinen@ti.com>
 L:	linux-fbdev@vger.kernel.org
 L:	linux-fbdev@vger.kernel.org
 W:	http://linux-fbdev.sourceforge.net/
 W:	http://linux-fbdev.sourceforge.net/
 Q:	http://patchwork.kernel.org/project/linux-fbdev/list/
 Q:	http://patchwork.kernel.org/project/linux-fbdev/list/
-T:	git git://github.com/schandinat/linux-2.6.git fbdev-next
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git
 S:	Maintained
 S:	Maintained
 F:	Documentation/fb/
 F:	Documentation/fb/
 F:	Documentation/devicetree/bindings/fb/
 F:	Documentation/devicetree/bindings/fb/
@@ -4447,6 +4448,16 @@ S:	Maintained
 F:	drivers/scsi/*iscsi*
 F:	drivers/scsi/*iscsi*
 F:	include/scsi/*iscsi*
 F:	include/scsi/*iscsi*
 
 
+ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
+M:	Or Gerlitz <ogerlitz@mellanox.com>
+M:	Roi Dayan <roid@mellanox.com>
+L:	linux-rdma@vger.kernel.org
+S:	Supported
+W:	http://www.openfabrics.org
+W:	www.open-iscsi.org
+Q:	http://patchwork.kernel.org/project/linux-rdma/list/
+F:	drivers/infiniband/ulp/iser
+
 ISDN SUBSYSTEM
 ISDN SUBSYSTEM
 M:	Karsten Keil <isdn@linux-pingi.de>
 M:	Karsten Keil <isdn@linux-pingi.de>
 L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)
 L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -5755,7 +5766,7 @@ M:	Matthew Wilcox <willy@linux.intel.com>
 L:	linux-nvme@lists.infradead.org
 L:	linux-nvme@lists.infradead.org
 T:	git git://git.infradead.org/users/willy/linux-nvme.git
 T:	git git://git.infradead.org/users/willy/linux-nvme.git
 S:	Supported
 S:	Supported
-F:	drivers/block/nvme.c
+F:	drivers/block/nvme*
 F:	include/linux/nvme.h
 F:	include/linux/nvme.h
 
 
 OMAP SUPPORT
 OMAP SUPPORT
@@ -6087,7 +6098,15 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 S:	Maintained
 S:	Maintained
 F:	arch/parisc/
 F:	arch/parisc/
+F:	Documentation/parisc/
 F:	drivers/parisc/
 F:	drivers/parisc/
+F:	drivers/char/agp/parisc-agp.c
+F:	drivers/input/serio/gscps2.c
+F:	drivers/parport/parport_gsc.*
+F:	drivers/tty/serial/8250/8250_gsc.c
+F:	drivers/video/sti*
+F:	drivers/video/console/sti*
+F:	drivers/video/logo/logo_parisc*
 
 
 PC87360 HARDWARE MONITORING DRIVER
 PC87360 HARDWARE MONITORING DRIVER
 M:	Jim Cromie <jim.cromie@gmail.com>
 M:	Jim Cromie <jim.cromie@gmail.com>
@@ -7605,7 +7624,7 @@ F:	drivers/clk/spear/
 SPI SUBSYSTEM
 SPI SUBSYSTEM
 M:	Mark Brown <broonie@kernel.org>
 M:	Mark Brown <broonie@kernel.org>
 M:	Grant Likely <grant.likely@linaro.org>
 M:	Grant Likely <grant.likely@linaro.org>
-L:	spi-devel-general@lists.sourceforge.net
+L:	linux-spi@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
 Q:	http://patchwork.kernel.org/project/spi-devel-general/list/
 Q:	http://patchwork.kernel.org/project/spi-devel-general/list/
 S:	Maintained
 S:	Maintained
@@ -8985,7 +9004,7 @@ S:	Maintained
 F:	drivers/net/wireless/wl3501*
 F:	drivers/net/wireless/wl3501*
 
 
 WM97XX TOUCHSCREEN DRIVERS
 WM97XX TOUCHSCREEN DRIVERS
-M:	Mark Brown <broonie@opensource.wolfsonmicro.com>
+M:	Mark Brown <broonie@kernel.org>
 M:	Liam Girdwood <lrg@slimlogic.co.uk>
 M:	Liam Girdwood <lrg@slimlogic.co.uk>
 L:	linux-input@vger.kernel.org
 L:	linux-input@vger.kernel.org
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-touch
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-touch
@@ -8995,7 +9014,6 @@ F:	drivers/input/touchscreen/*wm97*
 F:	include/linux/wm97xx.h
 F:	include/linux/wm97xx.h
 
 
 WOLFSON MICROELECTRONICS DRIVERS
 WOLFSON MICROELECTRONICS DRIVERS
-M:	Mark Brown <broonie@opensource.wolfsonmicro.com>
 L:	patches@opensource.wolfsonmicro.com
 L:	patches@opensource.wolfsonmicro.com
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-asoc
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-asoc
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-audioplus

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 10
 PATCHLEVEL = 10
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
 NAME = Unicycling Gorilla
 NAME = Unicycling Gorilla
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 1
arch/arm/boot/compressed/Makefile

@@ -124,7 +124,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 endif
 
 
 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
-asflags-y := -Wa,-march=all -DZIMAGE
+asflags-y := -DZIMAGE
 
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
 # Supply kernel BSS size to the decompressor via a linker symbol.
 KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
 KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \

+ 28 - 0
arch/arm/boot/compressed/debug.S

@@ -1,6 +1,8 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
 
 
+#ifndef CONFIG_DEBUG_SEMIHOSTING
+
 #include CONFIG_DEBUG_LL_INCLUDE
 #include CONFIG_DEBUG_LL_INCLUDE
 
 
 ENTRY(putc)
 ENTRY(putc)
@@ -10,3 +12,29 @@ ENTRY(putc)
 	busyuart r3, r1
 	busyuart r3, r1
 	mov	 pc, lr
 	mov	 pc, lr
 ENDPROC(putc)
 ENDPROC(putc)
+
+#else
+
+ENTRY(putc)
+	adr	r1, 1f
+	ldmia	r1, {r2, r3}
+	add	r2, r2, r1
+	ldr	r1, [r2, r3]
+	strb	r0, [r1]
+	mov	r0, #0x03		@ SYS_WRITEC
+   ARM(	svc	#0x123456	)
+ THUMB(	svc	#0xab		)
+	mov	pc, lr
+	.align	2
+1:	.word	_GLOBAL_OFFSET_TABLE_ - .
+	.word	semi_writec_buf(GOT)
+ENDPROC(putc)
+
+	.bss
+	.global	semi_writec_buf
+	.type   semi_writec_buf, %object
+semi_writec_buf:
+	.space	4
+	.size	semi_writec_buf, 4
+
+#endif

+ 1 - 0
arch/arm/boot/compressed/head-sa1100.S

@@ -11,6 +11,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach-types.h>
 
 
 		.section        ".start", "ax"
 		.section        ".start", "ax"
+		.arch	armv4
 
 
 __SA1100_start:
 __SA1100_start:
 
 

+ 1 - 0
arch/arm/boot/compressed/head-shark.S

@@ -18,6 +18,7 @@
 	
 	
 		.section	".start", "ax"
 		.section	".start", "ax"
 
 
+		.arch armv4
 		b	__beginning
 		b	__beginning
 	
 	
 __ofw_data:	.long	0				@ the number of memory blocks
 __ofw_data:	.long	0				@ the number of memory blocks

+ 3 - 2
arch/arm/boot/compressed/head.S

@@ -11,6 +11,7 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/assembler.h>
 
 
+	.arch	armv7-a
 /*
 /*
  * Debugging stuff
  * Debugging stuff
  *
  *
@@ -805,8 +806,8 @@ call_cache_fn:	adr	r12, proc_types
 		.align	2
 		.align	2
 		.type	proc_types,#object
 		.type	proc_types,#object
 proc_types:
 proc_types:
-		.word	0x00000000		@ old ARM ID
-		.word	0x0000f000
+		.word	0x41000000		@ old ARM ID
+		.word	0xff00f000
 		mov	pc, lr
 		mov	pc, lr
  THUMB(		nop				)
  THUMB(		nop				)
 		mov	pc, lr
 		mov	pc, lr

+ 2 - 2
arch/arm/boot/dts/am33xx.dtsi

@@ -409,8 +409,8 @@
 			ti,hwmods = "gpmc";
 			ti,hwmods = "gpmc";
 			reg = <0x50000000 0x2000>;
 			reg = <0x50000000 0x2000>;
 			interrupts = <100>;
 			interrupts = <100>;
-			num-cs = <7>;
-			num-waitpins = <2>;
+			gpmc,num-cs = <7>;
+			gpmc,num-waitpins = <2>;
 			#address-cells = <2>;
 			#address-cells = <2>;
 			#size-cells = <1>;
 			#size-cells = <1>;
 			status = "disabled";
 			status = "disabled";

+ 3 - 2
arch/arm/boot/dts/armada-xp-gp.dts

@@ -39,8 +39,9 @@
 	};
 	};
 
 
 	soc {
 	soc {
-		ranges = <0          0 0xd0000000 0x100000
-			  0xf0000000 0 0xf0000000 0x1000000>;
+		ranges = <0          0 0xd0000000 0x100000  /* Internal registers 1MiB */
+			  0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
+			  0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB  */>;
 
 
 		internal-regs {
 		internal-regs {
 			serial@12000 {
 			serial@12000 {

+ 3 - 2
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts

@@ -27,8 +27,9 @@
 	};
 	};
 
 
 	soc {
 	soc {
-		ranges = <0          0 0xd0000000 0x100000
-			  0xf0000000 0 0xf0000000 0x8000000>;
+		ranges = <0          0 0xd0000000 0x100000	/* Internal registers 1MiB */
+			  0xe0000000 0 0xe0000000 0x8100000     /* PCIe */
+			  0xf0000000 0 0xf0000000 0x8000000     /* Device Bus, NOR 128MiB   */>;
 
 
 		internal-regs {
 		internal-regs {
 			serial@12000 {
 			serial@12000 {

+ 1 - 0
arch/arm/boot/dts/bcm2835.dtsi

@@ -44,6 +44,7 @@
 			reg = <0x7e201000 0x1000>;
 			reg = <0x7e201000 0x1000>;
 			interrupts = <2 25>;
 			interrupts = <2 25>;
 			clock-frequency = <3000000>;
 			clock-frequency = <3000000>;
+			arm,primecell-periphid = <0x00241011>;
 		};
 		};
 
 
 		gpio: gpio {
 		gpio: gpio {

+ 6 - 6
arch/arm/boot/dts/imx25.dtsi

@@ -141,8 +141,8 @@
 				#size-cells = <0>;
 				#size-cells = <0>;
 				compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
 				compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
 				reg = <0x43fa4000 0x4000>;
 				reg = <0x43fa4000 0x4000>;
-				clocks = <&clks 62>;
-				clock-names = "ipg";
+				clocks = <&clks 62>, <&clks 62>;
+				clock-names = "ipg", "per";
 				interrupts = <14>;
 				interrupts = <14>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};
@@ -182,8 +182,8 @@
 				compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
 				compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
 				reg = <0x50004000 0x4000>;
 				reg = <0x50004000 0x4000>;
 				interrupts = <0>;
 				interrupts = <0>;
-				clocks = <&clks 80>;
-				clock-names = "ipg";
+				clocks = <&clks 80>, <&clks 80>;
+				clock-names = "ipg", "per";
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 
 
@@ -210,8 +210,8 @@
 				#size-cells = <0>;
 				#size-cells = <0>;
 				compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
 				compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
 				reg = <0x50010000 0x4000>;
 				reg = <0x50010000 0x4000>;
-				clocks = <&clks 79>;
-				clock-names = "ipg";
+				clocks = <&clks 79>, <&clks 79>;
+				clock-names = "ipg", "per";
 				interrupts = <13>;
 				interrupts = <13>;
 				status = "disabled";
 				status = "disabled";
 			};
 			};

+ 3 - 3
arch/arm/boot/dts/imx27.dtsi

@@ -131,7 +131,7 @@
 				compatible = "fsl,imx27-cspi";
 				compatible = "fsl,imx27-cspi";
 				reg = <0x1000e000 0x1000>;
 				reg = <0x1000e000 0x1000>;
 				interrupts = <16>;
 				interrupts = <16>;
-				clocks = <&clks 53>, <&clks 0>;
+				clocks = <&clks 53>, <&clks 53>;
 				clock-names = "ipg", "per";
 				clock-names = "ipg", "per";
 				status = "disabled";
 				status = "disabled";
 			};
 			};
@@ -142,7 +142,7 @@
 				compatible = "fsl,imx27-cspi";
 				compatible = "fsl,imx27-cspi";
 				reg = <0x1000f000 0x1000>;
 				reg = <0x1000f000 0x1000>;
 				interrupts = <15>;
 				interrupts = <15>;
-				clocks = <&clks 52>, <&clks 0>;
+				clocks = <&clks 52>, <&clks 52>;
 				clock-names = "ipg", "per";
 				clock-names = "ipg", "per";
 				status = "disabled";
 				status = "disabled";
 			};
 			};
@@ -223,7 +223,7 @@
 				compatible = "fsl,imx27-cspi";
 				compatible = "fsl,imx27-cspi";
 				reg = <0x10017000 0x1000>;
 				reg = <0x10017000 0x1000>;
 				interrupts = <6>;
 				interrupts = <6>;
-				clocks = <&clks 51>, <&clks 0>;
+				clocks = <&clks 51>, <&clks 51>;
 				clock-names = "ipg", "per";
 				clock-names = "ipg", "per";
 				status = "disabled";
 				status = "disabled";
 			};
 			};

+ 1 - 1
arch/arm/boot/dts/imx51.dtsi

@@ -631,7 +631,7 @@
 				compatible = "fsl,imx51-cspi", "fsl,imx35-cspi";
 				compatible = "fsl,imx51-cspi", "fsl,imx35-cspi";
 				reg = <0x83fc0000 0x4000>;
 				reg = <0x83fc0000 0x4000>;
 				interrupts = <38>;
 				interrupts = <38>;
-				clocks = <&clks 55>, <&clks 0>;
+				clocks = <&clks 55>, <&clks 55>;
 				clock-names = "ipg", "per";
 				clock-names = "ipg", "per";
 				status = "disabled";
 				status = "disabled";
 			};
 			};

+ 1 - 1
arch/arm/boot/dts/imx53.dtsi

@@ -714,7 +714,7 @@
 				compatible = "fsl,imx53-cspi", "fsl,imx35-cspi";
 				compatible = "fsl,imx53-cspi", "fsl,imx35-cspi";
 				reg = <0x63fc0000 0x4000>;
 				reg = <0x63fc0000 0x4000>;
 				interrupts = <38>;
 				interrupts = <38>;
-				clocks = <&clks 55>, <&clks 0>;
+				clocks = <&clks 55>, <&clks 55>;
 				clock-names = "ipg", "per";
 				clock-names = "ipg", "per";
 				status = "disabled";
 				status = "disabled";
 			};
 			};

+ 20 - 0
arch/arm/boot/dts/omap4-panda-common.dtsi

@@ -56,9 +56,23 @@
 	};
 	};
 };
 };
 
 
+&omap4_pmx_wkup {
+	pinctrl-names = "default";
+	pinctrl-0 = <
+			&twl6030_wkup_pins
+	>;
+
+	twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+		pinctrl-single,pins = <
+			0x14 0x2        /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+		>;
+	};
+};
+
 &omap4_pmx_core {
 &omap4_pmx_core {
 	pinctrl-names = "default";
 	pinctrl-names = "default";
 	pinctrl-0 = <
 	pinctrl-0 = <
+			&twl6030_pins
 			&twl6040_pins
 			&twl6040_pins
 			&mcpdm_pins
 			&mcpdm_pins
 			&mcbsp1_pins
 			&mcbsp1_pins
@@ -66,6 +80,12 @@
 			&tpd12s015_pins
 			&tpd12s015_pins
 	>;
 	>;
 
 
+	twl6030_pins: pinmux_twl6030_pins {
+		pinctrl-single,pins = <
+			0x15e 0x4118	/* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+		>;
+	};
+
 	twl6040_pins: pinmux_twl6040_pins {
 	twl6040_pins: pinmux_twl6040_pins {
 		pinctrl-single,pins = <
 		pinctrl-single,pins = <
 			0xe0 0x3	/* hdq_sio.gpio_127 OUTPUT | MODE3 */
 			0xe0 0x3	/* hdq_sio.gpio_127 OUTPUT | MODE3 */

+ 20 - 0
arch/arm/boot/dts/omap4-sdp.dts

@@ -142,9 +142,23 @@
 	};
 	};
 };
 };
 
 
+&omap4_pmx_wkup {
+	pinctrl-names = "default";
+	pinctrl-0 = <
+			&twl6030_wkup_pins
+	>;
+
+	twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+		pinctrl-single,pins = <
+			0x14 0x2        /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+		>;
+	};
+};
+
 &omap4_pmx_core {
 &omap4_pmx_core {
 	pinctrl-names = "default";
 	pinctrl-names = "default";
 	pinctrl-0 = <
 	pinctrl-0 = <
+			&twl6030_pins
 			&twl6040_pins
 			&twl6040_pins
 			&mcpdm_pins
 			&mcpdm_pins
 			&dmic_pins
 			&dmic_pins
@@ -179,6 +193,12 @@
 		>;
 		>;
 	};
 	};
 
 
+	twl6030_pins: pinmux_twl6030_pins {
+		pinctrl-single,pins = <
+			0x15e 0x4118	/* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+		>;
+	};
+
 	twl6040_pins: pinmux_twl6040_pins {
 	twl6040_pins: pinmux_twl6040_pins {
 		pinctrl-single,pins = <
 		pinctrl-single,pins = <
 			0xe0 0x3	/* hdq_sio.gpio_127 OUTPUT | MODE3 */
 			0xe0 0x3	/* hdq_sio.gpio_127 OUTPUT | MODE3 */

+ 3 - 0
arch/arm/boot/dts/omap5.dtsi

@@ -538,6 +538,7 @@
 			interrupts = <0 41 0x4>;
 			interrupts = <0 41 0x4>;
 			ti,hwmods = "timer5";
 			ti,hwmods = "timer5";
 			ti,timer-dsp;
 			ti,timer-dsp;
+			ti,timer-pwm;
 		};
 		};
 
 
 		timer6: timer@4013a000 {
 		timer6: timer@4013a000 {
@@ -574,6 +575,7 @@
 			reg = <0x4803e000 0x80>;
 			reg = <0x4803e000 0x80>;
 			interrupts = <0 45 0x4>;
 			interrupts = <0 45 0x4>;
 			ti,hwmods = "timer9";
 			ti,hwmods = "timer9";
+			ti,timer-pwm;
 		};
 		};
 
 
 		timer10: timer@48086000 {
 		timer10: timer@48086000 {
@@ -581,6 +583,7 @@
 			reg = <0x48086000 0x80>;
 			reg = <0x48086000 0x80>;
 			interrupts = <0 46 0x4>;
 			interrupts = <0 46 0x4>;
 			ti,hwmods = "timer10";
 			ti,hwmods = "timer10";
+			ti,timer-pwm;
 		};
 		};
 
 
 		timer11: timer@48088000 {
 		timer11: timer@48088000 {

+ 9 - 2
arch/arm/include/asm/percpu.h

@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
 static inline unsigned long __my_cpu_offset(void)
 static inline unsigned long __my_cpu_offset(void)
 {
 {
 	unsigned long off;
 	unsigned long off;
-	/* Read TPIDRPRW */
-	asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
+	register unsigned long *sp asm ("sp");
+
+	/*
+	 * Read TPIDRPRW.
+	 * We want to allow caching the value, so avoid using volatile and
+	 * instead use a fake stack read to hazard against barrier().
+	 */
+	asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
+
 	return off;
 	return off;
 }
 }
 #define __my_cpu_offset __my_cpu_offset()
 #define __my_cpu_offset __my_cpu_offset()

+ 4 - 23
arch/arm/include/asm/tlb.h

@@ -33,18 +33,6 @@
 #include <asm/pgalloc.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 
 
-/*
- * We need to delay page freeing for SMP as other CPUs can access pages
- * which have been removed but not yet had their TLB entries invalidated.
- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
- * we need to apply this same delaying tactic to ensure correct operation.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
-#define tlb_fast_mode(tlb)	0
-#else
-#define tlb_fast_mode(tlb)	1
-#endif
-
 #define MMU_GATHER_BUNDLE	8
 #define MMU_GATHER_BUNDLE	8
 
 
 /*
 /*
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
 {
 	tlb_flush(tlb);
 	tlb_flush(tlb);
-	if (!tlb_fast_mode(tlb)) {
-		free_pages_and_swap_cache(tlb->pages, tlb->nr);
-		tlb->nr = 0;
-		if (tlb->pages == tlb->local)
-			__tlb_alloc_page(tlb);
-	}
+	free_pages_and_swap_cache(tlb->pages, tlb->nr);
+	tlb->nr = 0;
+	if (tlb->pages == tlb->local)
+		__tlb_alloc_page(tlb);
 }
 }
 
 
 static inline void
 static inline void
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 
 
 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 {
-	if (tlb_fast_mode(tlb)) {
-		free_page_and_swap_cache(page);
-		return 1; /* avoid calling tlb_flush_mmu */
-	}
-
 	tlb->pages[tlb->nr++] = page;
 	tlb->pages[tlb->nr++] = page;
 	VM_BUG_ON(tlb->nr > tlb->max);
 	VM_BUG_ON(tlb->nr > tlb->max);
 	return tlb->max - tlb->nr;
 	return tlb->max - tlb->nr;

+ 2 - 0
arch/arm/kernel/topology.c

@@ -13,6 +13,7 @@
 
 
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/percpu.h>
 #include <linux/percpu.h>
 #include <linux/node.h>
 #include <linux/node.h>
@@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
  * cpu topology table
  * cpu topology table
  */
  */
 struct cputopo_arm cpu_topology[NR_CPUS];
 struct cputopo_arm cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
 
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
 {

+ 13 - 2
arch/arm/kvm/arm.c

@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu)
 	wait_event_interruptible(*wq, !vcpu->arch.pause);
 	wait_event_interruptible(*wq, !vcpu->arch.pause);
 }
 }
 
 
+static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.target >= 0;
+}
+
 /**
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:	The VCPU pointer
  * @vcpu:	The VCPU pointer
@@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	int ret;
 	int ret;
 	sigset_t sigsaved;
 	sigset_t sigsaved;
 
 
-	/* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
-	if (unlikely(vcpu->arch.target < 0))
+	if (unlikely(!kvm_vcpu_initialized(vcpu)))
 		return -ENOEXEC;
 		return -ENOEXEC;
 
 
 	ret = kvm_vcpu_first_run_init(vcpu);
 	ret = kvm_vcpu_first_run_init(vcpu);
@@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 	case KVM_SET_ONE_REG:
 	case KVM_SET_ONE_REG:
 	case KVM_GET_ONE_REG: {
 	case KVM_GET_ONE_REG: {
 		struct kvm_one_reg reg;
 		struct kvm_one_reg reg;
+
+		if (unlikely(!kvm_vcpu_initialized(vcpu)))
+			return -ENOEXEC;
+
 		if (copy_from_user(&reg, argp, sizeof(reg)))
 		if (copy_from_user(&reg, argp, sizeof(reg)))
 			return -EFAULT;
 			return -EFAULT;
 		if (ioctl == KVM_SET_ONE_REG)
 		if (ioctl == KVM_SET_ONE_REG)
@@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		struct kvm_reg_list reg_list;
 		struct kvm_reg_list reg_list;
 		unsigned n;
 		unsigned n;
 
 
+		if (unlikely(!kvm_vcpu_initialized(vcpu)))
+			return -ENOEXEC;
+
 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 			return -EFAULT;
 			return -EFAULT;
 		n = reg_list.n;
 		n = reg_list.n;

+ 26 - 15
arch/arm/kvm/mmu.c

@@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector;
 
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
 {
-	kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
+	/*
+	 * This function also gets called when dealing with HYP page
+	 * tables. As HYP doesn't have an associated struct kvm (and
+	 * the HYP page tables are fairly static), we don't do
+	 * anything there.
+	 */
+	if (kvm)
+		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 }
 
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
 	return p;
 	return p;
 }
 }
 
 
-static void clear_pud_entry(pud_t *pud)
+static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
 {
 	pmd_t *pmd_table = pmd_offset(pud, 0);
 	pmd_t *pmd_table = pmd_offset(pud, 0);
 	pud_clear(pud);
 	pud_clear(pud);
+	kvm_tlb_flush_vmid_ipa(kvm, addr);
 	pmd_free(NULL, pmd_table);
 	pmd_free(NULL, pmd_table);
 	put_page(virt_to_page(pud));
 	put_page(virt_to_page(pud));
 }
 }
 
 
-static void clear_pmd_entry(pmd_t *pmd)
+static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
 {
 	pte_t *pte_table = pte_offset_kernel(pmd, 0);
 	pte_t *pte_table = pte_offset_kernel(pmd, 0);
 	pmd_clear(pmd);
 	pmd_clear(pmd);
+	kvm_tlb_flush_vmid_ipa(kvm, addr);
 	pte_free_kernel(NULL, pte_table);
 	pte_free_kernel(NULL, pte_table);
 	put_page(virt_to_page(pmd));
 	put_page(virt_to_page(pmd));
 }
 }
@@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd)
 	return page_count(pmd_page) == 1;
 	return page_count(pmd_page) == 1;
 }
 }
 
 
-static void clear_pte_entry(pte_t *pte)
+static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
 {
 {
 	if (pte_present(*pte)) {
 	if (pte_present(*pte)) {
 		kvm_set_pte(pte, __pte(0));
 		kvm_set_pte(pte, __pte(0));
 		put_page(virt_to_page(pte));
 		put_page(virt_to_page(pte));
+		kvm_tlb_flush_vmid_ipa(kvm, addr);
 	}
 	}
 }
 }
 
 
@@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte)
 	return page_count(pte_page) == 1;
 	return page_count(pte_page) == 1;
 }
 }
 
 
-static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
+static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+			unsigned long long start, u64 size)
 {
 {
 	pgd_t *pgd;
 	pgd_t *pgd;
 	pud_t *pud;
 	pud_t *pud;
@@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
 		}
 		}
 
 
 		pte = pte_offset_kernel(pmd, addr);
 		pte = pte_offset_kernel(pmd, addr);
-		clear_pte_entry(pte);
+		clear_pte_entry(kvm, pte, addr);
 		range = PAGE_SIZE;
 		range = PAGE_SIZE;
 
 
 		/* If we emptied the pte, walk back up the ladder */
 		/* If we emptied the pte, walk back up the ladder */
 		if (pte_empty(pte)) {
 		if (pte_empty(pte)) {
-			clear_pmd_entry(pmd);
+			clear_pmd_entry(kvm, pmd, addr);
 			range = PMD_SIZE;
 			range = PMD_SIZE;
 			if (pmd_empty(pmd)) {
 			if (pmd_empty(pmd)) {
-				clear_pud_entry(pud);
+				clear_pud_entry(kvm, pud, addr);
 				range = PUD_SIZE;
 				range = PUD_SIZE;
 			}
 			}
 		}
 		}
@@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void)
 	mutex_lock(&kvm_hyp_pgd_mutex);
 	mutex_lock(&kvm_hyp_pgd_mutex);
 
 
 	if (boot_hyp_pgd) {
 	if (boot_hyp_pgd) {
-		unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-		unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+		unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+		unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 		kfree(boot_hyp_pgd);
 		kfree(boot_hyp_pgd);
 		boot_hyp_pgd = NULL;
 		boot_hyp_pgd = NULL;
 	}
 	}
 
 
 	if (hyp_pgd)
 	if (hyp_pgd)
-		unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+		unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
 
 	kfree(init_bounce_page);
 	kfree(init_bounce_page);
 	init_bounce_page = NULL;
 	init_bounce_page = NULL;
@@ -200,9 +211,10 @@ void free_hyp_pgds(void)
 
 
 	if (hyp_pgd) {
 	if (hyp_pgd) {
 		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
 		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-			unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
 		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-			unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+
 		kfree(hyp_pgd);
 		kfree(hyp_pgd);
 		hyp_pgd = NULL;
 		hyp_pgd = NULL;
 	}
 	}
@@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
  */
  */
 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
 {
 {
-	unmap_range(kvm->arch.pgd, start, size);
+	unmap_range(kvm, kvm->arch.pgd, start, size);
 }
 }
 
 
 /**
 /**
@@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm,
 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 {
 {
 	unmap_stage2_range(kvm, gpa, PAGE_SIZE);
 	unmap_stage2_range(kvm, gpa, PAGE_SIZE);
-	kvm_tlb_flush_vmid_ipa(kvm, gpa);
 }
 }
 
 
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)

+ 2 - 0
arch/arm/mach-exynos/common.c

@@ -386,6 +386,8 @@ int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
 
 
 void __init exynos_init_io(struct map_desc *mach_desc, int size)
 void __init exynos_init_io(struct map_desc *mach_desc, int size)
 {
 {
+	debug_ll_io_init();
+
 #ifdef CONFIG_OF
 #ifdef CONFIG_OF
 	if (initial_boot_params)
 	if (initial_boot_params)
 		of_scan_flat_dt(exynos_fdt_map_chipid, NULL);
 		of_scan_flat_dt(exynos_fdt_map_chipid, NULL);

+ 2 - 2
arch/arm/mach-imx/clk-imx6q.c

@@ -181,14 +181,14 @@ static const char *periph_clk2_sels[]	= { "pll3_usb_otg", "osc", "osc", "dummy",
 static const char *periph2_clk2_sels[]	= { "pll3_usb_otg", "pll2_bus", };
 static const char *periph2_clk2_sels[]	= { "pll3_usb_otg", "pll2_bus", };
 static const char *periph_sels[]	= { "periph_pre", "periph_clk2", };
 static const char *periph_sels[]	= { "periph_pre", "periph_clk2", };
 static const char *periph2_sels[]	= { "periph2_pre", "periph2_clk2", };
 static const char *periph2_sels[]	= { "periph2_pre", "periph2_clk2", };
-static const char *axi_sels[]		= { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[]		= { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
 static const char *audio_sels[]	= { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
 static const char *audio_sels[]	= { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
 static const char *gpu_axi_sels[]	= { "axi", "ahb", };
 static const char *gpu_axi_sels[]	= { "axi", "ahb", };
 static const char *gpu2d_core_sels[]	= { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
 static const char *gpu2d_core_sels[]	= { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
 static const char *gpu3d_core_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
 static const char *gpu3d_core_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
 static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
 static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
 static const char *ipu_sels[]		= { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
 static const char *ipu_sels[]		= { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[]	= { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
+static const char *ldb_di_sels[]	= { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
 static const char *ipu_di_pre_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
 static const char *ipu_di_pre_sels[]	= { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
 static const char *ipu1_di0_sels[]	= { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
 static const char *ipu1_di0_sels[]	= { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
 static const char *ipu1_di1_sels[]	= { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
 static const char *ipu1_di1_sels[]	= { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };

+ 0 - 10
arch/arm/mach-kirkwood/board-ts219.c

@@ -41,13 +41,3 @@ void __init qnap_dt_ts219_init(void)
 
 
 	pm_power_off = qnap_tsx1x_power_off;
 	pm_power_off = qnap_tsx1x_power_off;
 }
 }
-
-/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */
-static int __init ts219_pci_init(void)
-{
-	if (machine_is_ts219())
-		kirkwood_pcie_init(KW_PCIE0);
-
-	return 0;
-}
-subsys_initcall(ts219_pci_init);

+ 3 - 2
arch/arm/mach-kirkwood/mpp.c

@@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void)
 
 
 	kirkwood_pcie_id(&dev, &rev);
 	kirkwood_pcie_id(&dev, &rev);
 
 
-	if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
-	    (dev == MV88F6282_DEV_ID))
+	if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
 		return MPP_F6281_MASK;
 		return MPP_F6281_MASK;
+	if (dev == MV88F6282_DEV_ID)
+		return MPP_F6282_MASK;
 	if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
 	if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
 		return MPP_F6192_MASK;
 		return MPP_F6192_MASK;
 	if (dev == MV88F6180_DEV_ID)
 	if (dev == MV88F6180_DEV_ID)

+ 11 - 5
arch/arm/mach-mvebu/coherency_ll.S

@@ -32,15 +32,21 @@ ENTRY(ll_set_cpu_coherent)
 
 
 	/* Add CPU to SMP group - Atomic */
 	/* Add CPU to SMP group - Atomic */
 	add	r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
 	add	r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
-	ldr	r2, [r3]
+1:
+	ldrex	r2, [r3]
 	orr	r2, r2, r1
 	orr	r2, r2, r1
-	str	r2, [r3]
+	strex 	r0, r2, [r3]
+	cmp	r0, #0
+	bne 1b
 
 
 	/* Enable coherency on CPU - Atomic */
 	/* Enable coherency on CPU - Atomic */
-	add	r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET
-	ldr	r2, [r3]
+	add	r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET
+1:
+	ldrex	r2, [r3]
 	orr	r2, r2, r1
 	orr	r2, r2, r1
-	str	r2, [r3]
+	strex	r0, r2, [r3]
+	cmp	r0, #0
+	bne 1b
 
 
 	dsb
 	dsb
 
 

+ 9 - 9
arch/arm/mach-omap2/clock36xx.c

@@ -20,11 +20,12 @@
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/io.h>
 
 
 #include "clock.h"
 #include "clock.h"
 #include "clock36xx.h"
 #include "clock36xx.h"
-
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
 
 /**
 /**
  * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
  * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
@@ -39,29 +40,28 @@
  */
  */
 int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
 int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
 {
 {
-	struct clk_hw_omap *parent;
+	struct clk_divider *parent;
 	struct clk_hw *parent_hw;
 	struct clk_hw *parent_hw;
-	u32 dummy_v, orig_v, clksel_shift;
+	u32 dummy_v, orig_v;
 	int ret;
 	int ret;
 
 
 	/* Clear PWRDN bit of HSDIVIDER */
 	/* Clear PWRDN bit of HSDIVIDER */
 	ret = omap2_dflt_clk_enable(clk);
 	ret = omap2_dflt_clk_enable(clk);
 
 
 	parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
 	parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
-	parent = to_clk_hw_omap(parent_hw);
+	parent = to_clk_divider(parent_hw);
 
 
 	/* Restore the dividers */
 	/* Restore the dividers */
 	if (!ret) {
 	if (!ret) {
-		clksel_shift = __ffs(parent->clksel_mask);
-		orig_v = __raw_readl(parent->clksel_reg);
+		orig_v = __raw_readl(parent->reg);
 		dummy_v = orig_v;
 		dummy_v = orig_v;
 
 
 		/* Write any other value different from the Read value */
 		/* Write any other value different from the Read value */
-		dummy_v ^= (1 << clksel_shift);
-		__raw_writel(dummy_v, parent->clksel_reg);
+		dummy_v ^= (1 << parent->shift);
+		__raw_writel(dummy_v, parent->reg);
 
 
 		/* Write the original divider */
 		/* Write the original divider */
-		__raw_writel(orig_v, parent->clksel_reg);
+		__raw_writel(orig_v, parent->reg);
 	}
 	}
 
 
 	return ret;
 	return ret;

+ 8 - 1
arch/arm/mach-omap2/omap_hwmod_33xx_data.c

@@ -2007,6 +2007,13 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
 	},
 	},
 };
 };
 
 
+/* uart2 */
+static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
+	{ .name = "tx",	.dma_req = 28, },
+	{ .name = "rx",	.dma_req = 29, },
+	{ .dma_req = -1 }
+};
+
 static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
 static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
 	{ .irq = 73 + OMAP_INTC_START, },
 	{ .irq = 73 + OMAP_INTC_START, },
 	{ .irq = -1 },
 	{ .irq = -1 },
@@ -2018,7 +2025,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
 	.clkdm_name	= "l4ls_clkdm",
 	.clkdm_name	= "l4ls_clkdm",
 	.flags		= HWMOD_SWSUP_SIDLE_ACT,
 	.flags		= HWMOD_SWSUP_SIDLE_ACT,
 	.mpu_irqs	= am33xx_uart2_irqs,
 	.mpu_irqs	= am33xx_uart2_irqs,
-	.sdma_reqs	= uart1_edma_reqs,
+	.sdma_reqs	= uart2_edma_reqs,
 	.main_clk	= "dpll_per_m2_div4_ck",
 	.main_clk	= "dpll_per_m2_div4_ck",
 	.prcm		= {
 	.prcm		= {
 		.omap4	= {
 		.omap4	= {

+ 4 - 2
arch/arm/mach-omap2/pm34xx.c

@@ -546,8 +546,10 @@ static void __init prcm_setup_regs(void)
 	/* Clear any pending PRCM interrupts */
 	/* Clear any pending PRCM interrupts */
 	omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 	omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 
 
-	if (omap3_has_iva())
-		omap3_iva_idle();
+	/*
+	 * We need to idle iva2_pwrdm even on am3703 with no iva2.
+	 */
+	omap3_iva_idle();
 
 
 	omap3_d2d_idle();
 	omap3_d2d_idle();
 }
 }

+ 4 - 2
arch/arm/mach-prima2/pm.c

@@ -101,8 +101,10 @@ static int __init sirfsoc_of_pwrc_init(void)
 	struct device_node *np;
 	struct device_node *np;
 
 
 	np = of_find_matching_node(NULL, pwrc_ids);
 	np = of_find_matching_node(NULL, pwrc_ids);
-	if (!np)
-		panic("unable to find compatible pwrc node in dtb\n");
+	if (!np) {
+		pr_err("unable to find compatible sirf pwrc node in dtb\n");
+		return -ENOENT;
+	}
 
 
 	/*
 	/*
 	 * pwrc behind rtciobrg is not located in memory space
 	 * pwrc behind rtciobrg is not located in memory space

+ 4 - 2
arch/arm/mach-prima2/rstc.c

@@ -28,8 +28,10 @@ static int __init sirfsoc_of_rstc_init(void)
 	struct device_node *np;
 	struct device_node *np;
 
 
 	np = of_find_matching_node(NULL, rstc_ids);
 	np = of_find_matching_node(NULL, rstc_ids);
-	if (!np)
-		panic("unable to find compatible rstc node in dtb\n");
+	if (!np) {
+		pr_err("unable to find compatible sirf rstc node in dtb\n");
+		return -ENOENT;
+	}
 
 
 	sirfsoc_rstc_base = of_iomap(np, 0);
 	sirfsoc_rstc_base = of_iomap(np, 0);
 	if (!sirfsoc_rstc_base)
 	if (!sirfsoc_rstc_base)

+ 1 - 1
arch/arm/mach-shmobile/setup-sh73a0.c

@@ -252,7 +252,7 @@ static struct sh_timer_config cmt10_platform_data = {
 	.name = "CMT10",
 	.name = "CMT10",
 	.channel_offset = 0x10,
 	.channel_offset = 0x10,
 	.timer_bit = 0,
 	.timer_bit = 0,
-	.clockevent_rating = 125,
+	.clockevent_rating = 80,
 	.clocksource_rating = 125,
 	.clocksource_rating = 125,
 };
 };
 
 

+ 3 - 0
arch/arm/mach-ux500/board-mop500-regulators.c

@@ -374,6 +374,7 @@ static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
 static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
 static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
 	/* supplies to the display/camera */
 	/* supplies to the display/camera */
 	[AB8500_LDO_AUX1] = {
 	[AB8500_LDO_AUX1] = {
+		.supply_regulator = "ab8500-ext-supply3",
 		.constraints = {
 		.constraints = {
 			.name = "V-DISPLAY",
 			.name = "V-DISPLAY",
 			.min_uV = 2800000,
 			.min_uV = 2800000,
@@ -387,6 +388,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
 	},
 	},
 	/* supplies to the on-board eMMC */
 	/* supplies to the on-board eMMC */
 	[AB8500_LDO_AUX2] = {
 	[AB8500_LDO_AUX2] = {
+		.supply_regulator = "ab8500-ext-supply3",
 		.constraints = {
 		.constraints = {
 			.name = "V-eMMC1",
 			.name = "V-eMMC1",
 			.min_uV = 1100000,
 			.min_uV = 1100000,
@@ -402,6 +404,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
 	},
 	},
 	/* supply for VAUX3, supplies to SDcard slots */
 	/* supply for VAUX3, supplies to SDcard slots */
 	[AB8500_LDO_AUX3] = {
 	[AB8500_LDO_AUX3] = {
+		.supply_regulator = "ab8500-ext-supply3",
 		.constraints = {
 		.constraints = {
 			.name = "V-MMC-SD",
 			.name = "V-MMC-SD",
 			.min_uV = 1100000,
 			.min_uV = 1100000,

+ 4 - 0
arch/arm/mach-ux500/cpuidle.c

@@ -21,6 +21,7 @@
 #include <asm/proc-fns.h>
 #include <asm/proc-fns.h>
 
 
 #include "db8500-regs.h"
 #include "db8500-regs.h"
+#include "id.h"
 
 
 static atomic_t master = ATOMIC_INIT(0);
 static atomic_t master = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(master_lock);
 static DEFINE_SPINLOCK(master_lock);
@@ -114,6 +115,9 @@ static struct cpuidle_driver ux500_idle_driver = {
 
 
 int __init ux500_idle_init(void)
 int __init ux500_idle_init(void)
 {
 {
+	if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
+		return -ENODEV;
+
 	/* Configure wake up reasons */
 	/* Configure wake up reasons */
 	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
 	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
 			     PRCMU_WAKEUP(ABB));
 			     PRCMU_WAKEUP(ABB));

+ 9 - 1
arch/arm/plat-samsung/include/plat/uncompress.h

@@ -66,6 +66,9 @@ uart_rd(unsigned int reg)
 
 
 static void putc(int ch)
 static void putc(int ch)
 {
 {
+	if (!config_enabled(CONFIG_DEBUG_LL))
+		return;
+
 	if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) {
 	if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) {
 		int level;
 		int level;
 
 
@@ -118,7 +121,12 @@ static void arch_decomp_error(const char *x)
 #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO
 #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO
 static inline void arch_enable_uart_fifo(void)
 static inline void arch_enable_uart_fifo(void)
 {
 {
-	u32 fifocon = uart_rd(S3C2410_UFCON);
+	u32 fifocon;
+
+	if (!config_enabled(CONFIG_DEBUG_LL))
+		return;
+
+	fifocon = uart_rd(S3C2410_UFCON);
 
 
 	if (!(fifocon & S3C2410_UFCON_FIFOMODE)) {
 	if (!(fifocon & S3C2410_UFCON_FIFOMODE)) {
 		fifocon |= S3C2410_UFCON_RESETBOTH;
 		fifocon |= S3C2410_UFCON_RESETBOTH;

+ 13 - 5
arch/arm/plat-samsung/pm.c

@@ -16,6 +16,7 @@
 #include <linux/suspend.h>
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 #include <linux/serial_core.h>
 #include <linux/serial_core.h>
 #include <linux/io.h>
 #include <linux/io.h>
 
 
@@ -261,7 +262,8 @@ static int s3c_pm_enter(suspend_state_t state)
 	 * require a full power-cycle)
 	 * require a full power-cycle)
 	*/
 	*/
 
 
-	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
+	if (!of_have_populated_dt() &&
+	    !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
 	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
 	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
 		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
 		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
 		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
 		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
@@ -270,8 +272,11 @@ static int s3c_pm_enter(suspend_state_t state)
 
 
 	/* save all necessary core registers not covered by the drivers */
 	/* save all necessary core registers not covered by the drivers */
 
 
-	samsung_pm_save_gpios();
-	samsung_pm_saved_gpios();
+	if (!of_have_populated_dt()) {
+		samsung_pm_save_gpios();
+		samsung_pm_saved_gpios();
+	}
+
 	s3c_pm_save_uarts();
 	s3c_pm_save_uarts();
 	s3c_pm_save_core();
 	s3c_pm_save_core();
 
 
@@ -310,8 +315,11 @@ static int s3c_pm_enter(suspend_state_t state)
 
 
 	s3c_pm_restore_core();
 	s3c_pm_restore_core();
 	s3c_pm_restore_uarts();
 	s3c_pm_restore_uarts();
-	samsung_pm_restore_gpios();
-	s3c_pm_restored_gpios();
+
+	if (!of_have_populated_dt()) {
+		samsung_pm_restore_gpios();
+		s3c_pm_restored_gpios();
+	}
 
 
 	s3c_pm_debug_init();
 	s3c_pm_debug_init();
 
 

+ 1 - 0
arch/arm64/kernel/arm64ksyms.c

@@ -34,6 +34,7 @@ EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 
 
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
 
 
 EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
 EXPORT_SYMBOL(__copy_to_user);

+ 10 - 0
arch/arm64/kernel/entry.S

@@ -390,6 +390,16 @@ el0_sync_compat:
 	b.eq	el0_fpsimd_exc
 	b.eq	el0_fpsimd_exc
 	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
 	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
 	b.eq	el0_undef
 	b.eq	el0_undef
+	cmp	x24, #ESR_EL1_EC_CP15_32	// CP15 MRC/MCR trap
+	b.eq	el0_undef
+	cmp	x24, #ESR_EL1_EC_CP15_64	// CP15 MRRC/MCRR trap
+	b.eq	el0_undef
+	cmp	x24, #ESR_EL1_EC_CP14_MR	// CP14 MRC/MCR trap
+	b.eq	el0_undef
+	cmp	x24, #ESR_EL1_EC_CP14_LS	// CP14 LDC/STC trap
+	b.eq	el0_undef
+	cmp	x24, #ESR_EL1_EC_CP14_64	// CP14 MRRC/MCRR trap
+	b.eq	el0_undef
 	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
 	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
 	b.ge	el0_dbg
 	b.ge	el0_dbg
 	b	el0_inv
 	b	el0_inv

+ 12 - 5
arch/arm64/kernel/traps.c

@@ -267,7 +267,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 		return;
 		return;
 #endif
 #endif
 
 
-	if (show_unhandled_signals) {
+	if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
+	    printk_ratelimit()) {
 		pr_info("%s[%d]: undefined instruction: pc=%p\n",
 		pr_info("%s[%d]: undefined instruction: pc=%p\n",
 			current->comm, task_pid_nr(current), pc);
 			current->comm, task_pid_nr(current), pc);
 		dump_instr(KERN_INFO, regs);
 		dump_instr(KERN_INFO, regs);
@@ -294,7 +295,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
 	}
 	}
 #endif
 #endif
 
 
-	if (show_unhandled_signals) {
+	if (show_unhandled_signals && printk_ratelimit()) {
 		pr_info("%s[%d]: syscall %d\n", current->comm,
 		pr_info("%s[%d]: syscall %d\n", current->comm,
 			task_pid_nr(current), (int)regs->syscallno);
 			task_pid_nr(current), (int)regs->syscallno);
 		dump_instr("", regs);
 		dump_instr("", regs);
@@ -310,14 +311,20 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
  */
  */
 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
 {
+	siginfo_t info;
+	void __user *pc = (void __user *)instruction_pointer(regs);
 	console_verbose();
 	console_verbose();
 
 
 	pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
 	pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
 		handler[reason], esr);
 		handler[reason], esr);
+	__show_regs(regs);
+
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code  = ILL_ILLOPC;
+	info.si_addr  = pc;
 
 
-	die("Oops - bad mode", regs, 0);
-	local_irq_disable();
-	panic("bad mode");
+	arm64_notify_die("Oops - bad mode", regs, &info, 0);
 }
 }
 
 
 void __pte_error(const char *file, int line, unsigned long val)
 void __pte_error(const char *file, int line, unsigned long val)

+ 2 - 1
arch/arm64/mm/fault.c

@@ -113,7 +113,8 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
 {
 {
 	struct siginfo si;
 	struct siginfo si;
 
 
-	if (show_unhandled_signals) {
+	if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
+	    printk_ratelimit()) {
 		pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
 		pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
 			tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
 			tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
 			addr, esr);
 			addr, esr);

+ 8 - 33
arch/ia64/include/asm/tlb.h

@@ -46,12 +46,6 @@
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 #include <asm/machvec.h>
 #include <asm/machvec.h>
 
 
-#ifdef CONFIG_SMP
-# define tlb_fast_mode(tlb)	((tlb)->nr == ~0U)
-#else
-# define tlb_fast_mode(tlb)	(1)
-#endif
-
 /*
 /*
  * If we can't allocate a page to make a big batch of page pointers
  * If we can't allocate a page to make a big batch of page pointers
  * to work on, then just handle a few from the on-stack structure.
  * to work on, then just handle a few from the on-stack structure.
@@ -60,7 +54,7 @@
 
 
 struct mmu_gather {
 struct mmu_gather {
 	struct mm_struct	*mm;
 	struct mm_struct	*mm;
-	unsigned int		nr;		/* == ~0U => fast mode */
+	unsigned int		nr;
 	unsigned int		max;
 	unsigned int		max;
 	unsigned char		fullmm;		/* non-zero means full mm flush */
 	unsigned char		fullmm;		/* non-zero means full mm flush */
 	unsigned char		need_flush;	/* really unmapped some PTEs? */
 	unsigned char		need_flush;	/* really unmapped some PTEs? */
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 static inline void
 static inline void
 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
 {
+	unsigned long i;
 	unsigned int nr;
 	unsigned int nr;
 
 
 	if (!tlb->need_flush)
 	if (!tlb->need_flush)
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
 
 
 	/* lastly, release the freed pages */
 	/* lastly, release the freed pages */
 	nr = tlb->nr;
 	nr = tlb->nr;
-	if (!tlb_fast_mode(tlb)) {
-		unsigned long i;
-		tlb->nr = 0;
-		tlb->start_addr = ~0UL;
-		for (i = 0; i < nr; ++i)
-			free_page_and_swap_cache(tlb->pages[i]);
-	}
+
+	tlb->nr = 0;
+	tlb->start_addr = ~0UL;
+	for (i = 0; i < nr; ++i)
+		free_page_and_swap_cache(tlb->pages[i]);
 }
 }
 
 
 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m
 	tlb->mm = mm;
 	tlb->mm = mm;
 	tlb->max = ARRAY_SIZE(tlb->local);
 	tlb->max = ARRAY_SIZE(tlb->local);
 	tlb->pages = tlb->local;
 	tlb->pages = tlb->local;
-	/*
-	 * Use fast mode if only 1 CPU is online.
-	 *
-	 * It would be tempting to turn on fast-mode for full_mm_flush as well.  But this
-	 * doesn't work because of speculative accesses and software prefetching: the page
-	 * table of "mm" may (and usually is) the currently active page table and even
-	 * though the kernel won't do any user-space accesses during the TLB shoot down, a
-	 * compiler might use speculation or lfetch.fault on what happens to be a valid
-	 * user-space address.  This in turn could trigger a TLB miss fault (or a VHPT
-	 * walk) and re-insert a TLB entry we just removed.  Slow mode avoids such
-	 * problems.  (We could make fast-mode work by switching the current task to a
-	 * different "mm" during the shootdown.) --davidm 08/02/2002
-	 */
-	tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
+	tlb->nr = 0;
 	tlb->fullmm = full_mm_flush;
 	tlb->fullmm = full_mm_flush;
 	tlb->start_addr = ~0UL;
 	tlb->start_addr = ~0UL;
 }
 }
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 {
 	tlb->need_flush = 1;
 	tlb->need_flush = 1;
 
 
-	if (tlb_fast_mode(tlb)) {
-		free_page_and_swap_cache(page);
-		return 1; /* avoid calling tlb_flush_mmu */
-	}
-
 	if (!tlb->nr && tlb->pages == tlb->local)
 	if (!tlb->nr && tlb->pages == tlb->local)
 		__tlb_alloc_page(tlb);
 		__tlb_alloc_page(tlb);
 
 

+ 2 - 1
arch/m68k/include/asm/gpio.h

@@ -86,6 +86,7 @@ static inline int gpio_cansleep(unsigned gpio)
 	return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
 	return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
 }
 }
 
 
+#ifndef CONFIG_GPIOLIB
 static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
 static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
 {
 {
 	int err;
 	int err;
@@ -105,5 +106,5 @@ static inline int gpio_request_one(unsigned gpio, unsigned long flags, const cha
 
 
 	return err;
 	return err;
 }
 }
-
+#endif /* !CONFIG_GPIOLIB */
 #endif
 #endif

+ 19 - 10
arch/m68k/kernel/head.S

@@ -2752,11 +2752,9 @@ func_return	get_new_page
 #ifdef CONFIG_MAC
 #ifdef CONFIG_MAC
 
 
 L(scc_initable_mac):
 L(scc_initable_mac):
-	.byte	9,12		/* Reset */
 	.byte	4,0x44		/* x16, 1 stopbit, no parity */
 	.byte	4,0x44		/* x16, 1 stopbit, no parity */
 	.byte	3,0xc0		/* receiver: 8 bpc */
 	.byte	3,0xc0		/* receiver: 8 bpc */
 	.byte	5,0xe2		/* transmitter: 8 bpc, assert dtr/rts */
 	.byte	5,0xe2		/* transmitter: 8 bpc, assert dtr/rts */
-	.byte	9,0		/* no interrupts */
 	.byte	10,0		/* NRZ */
 	.byte	10,0		/* NRZ */
 	.byte	11,0x50		/* use baud rate generator */
 	.byte	11,0x50		/* use baud rate generator */
 	.byte	12,1,13,0	/* 38400 baud */
 	.byte	12,1,13,0	/* 38400 baud */
@@ -2899,6 +2897,7 @@ func_start	serial_init,%d0/%d1/%a0/%a1
 	is_not_mac(L(serial_init_not_mac))
 	is_not_mac(L(serial_init_not_mac))
 
 
 #ifdef SERIAL_DEBUG
 #ifdef SERIAL_DEBUG
+
 /* You may define either or both of these. */
 /* You may define either or both of these. */
 #define MAC_USE_SCC_A /* Modem port */
 #define MAC_USE_SCC_A /* Modem port */
 #define MAC_USE_SCC_B /* Printer port */
 #define MAC_USE_SCC_B /* Printer port */
@@ -2908,9 +2907,21 @@ func_start	serial_init,%d0/%d1/%a0/%a1
 #define mac_scc_cha_b_data_offset	0x4
 #define mac_scc_cha_b_data_offset	0x4
 #define mac_scc_cha_a_data_offset	0x6
 #define mac_scc_cha_a_data_offset	0x6
 
 
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+	movel	%pc@(L(mac_sccbase)),%a0
+	/* Reset SCC device */
+	moveb	#9,%a0@(mac_scc_cha_a_ctrl_offset)
+	moveb	#0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
+	/* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
+	/* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
+	movel	#35,%d0
+5:
+	subq	#1,%d0
+	jne	5b
+#endif
+
 #ifdef MAC_USE_SCC_A
 #ifdef MAC_USE_SCC_A
 	/* Initialize channel A */
 	/* Initialize channel A */
-	movel	%pc@(L(mac_sccbase)),%a0
 	lea	%pc@(L(scc_initable_mac)),%a1
 	lea	%pc@(L(scc_initable_mac)),%a1
 5:	moveb	%a1@+,%d0
 5:	moveb	%a1@+,%d0
 	jmi	6f
 	jmi	6f
@@ -2922,9 +2933,6 @@ func_start	serial_init,%d0/%d1/%a0/%a1
 
 
 #ifdef MAC_USE_SCC_B
 #ifdef MAC_USE_SCC_B
 	/* Initialize channel B */
 	/* Initialize channel B */
-#ifndef MAC_USE_SCC_A	/* Load mac_sccbase only if needed */
-	movel	%pc@(L(mac_sccbase)),%a0
-#endif	/* MAC_USE_SCC_A */
 	lea	%pc@(L(scc_initable_mac)),%a1
 	lea	%pc@(L(scc_initable_mac)),%a1
 7:	moveb	%a1@+,%d0
 7:	moveb	%a1@+,%d0
 	jmi	8f
 	jmi	8f
@@ -2933,6 +2941,7 @@ func_start	serial_init,%d0/%d1/%a0/%a1
 	jra	7b
 	jra	7b
 8:
 8:
 #endif	/* MAC_USE_SCC_B */
 #endif	/* MAC_USE_SCC_B */
+
 #endif	/* SERIAL_DEBUG */
 #endif	/* SERIAL_DEBUG */
 
 
 	jra	L(serial_init_done)
 	jra	L(serial_init_done)
@@ -3006,17 +3015,17 @@ func_start	serial_putc,%d0/%d1/%a0/%a1
 
 
 #ifdef SERIAL_DEBUG
 #ifdef SERIAL_DEBUG
 
 
-#ifdef MAC_USE_SCC_A
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
 	movel	%pc@(L(mac_sccbase)),%a1
 	movel	%pc@(L(mac_sccbase)),%a1
+#endif
+
+#ifdef MAC_USE_SCC_A
 3:	btst	#2,%a1@(mac_scc_cha_a_ctrl_offset)
 3:	btst	#2,%a1@(mac_scc_cha_a_ctrl_offset)
 	jeq	3b
 	jeq	3b
 	moveb	%d0,%a1@(mac_scc_cha_a_data_offset)
 	moveb	%d0,%a1@(mac_scc_cha_a_data_offset)
 #endif	/* MAC_USE_SCC_A */
 #endif	/* MAC_USE_SCC_A */
 
 
 #ifdef MAC_USE_SCC_B
 #ifdef MAC_USE_SCC_B
-#ifndef MAC_USE_SCC_A	/* Load mac_sccbase only if needed */
-	movel	%pc@(L(mac_sccbase)),%a1
-#endif	/* MAC_USE_SCC_A */
 4:	btst	#2,%a1@(mac_scc_cha_b_ctrl_offset)
 4:	btst	#2,%a1@(mac_scc_cha_b_ctrl_offset)
 	jeq	4b
 	jeq	4b
 	moveb	%d0,%a1@(mac_scc_cha_b_data_offset)
 	moveb	%d0,%a1@(mac_scc_cha_b_data_offset)

+ 18 - 16
arch/microblaze/include/asm/cacheflush.h

@@ -102,21 +102,23 @@ do { \
 
 
 #define flush_cache_range(vma, start, len) do { } while (0)
 #define flush_cache_range(vma, start, len) do { } while (0)
 
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
-do {									\
-	u32 addr = virt_to_phys(dst);					\
-	memcpy((dst), (src), (len));					\
-	if (vma->vm_flags & VM_EXEC) {					\
-		invalidate_icache_range((unsigned) (addr),		\
-					(unsigned) (addr) + PAGE_SIZE);	\
-		flush_dcache_range((unsigned) (addr),			\
-					(unsigned) (addr) + PAGE_SIZE);	\
-	}								\
-} while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len)		\
-do {									\
-	memcpy((dst), (src), (len));					\
-} while (0)
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+				     struct page *page, unsigned long vaddr,
+				     void *dst, void *src, int len)
+{
+	u32 addr = virt_to_phys(dst);
+	memcpy(dst, src, len);
+	if (vma->vm_flags & VM_EXEC) {
+		invalidate_icache_range(addr, addr + PAGE_SIZE);
+		flush_dcache_range(addr, addr + PAGE_SIZE);
+	}
+}
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+				       struct page *page, unsigned long vaddr,
+				       void *dst, void *src, int len)
+{
+	memcpy(dst, src, len);
+}
 
 
 #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
 #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */

+ 2 - 2
arch/microblaze/include/asm/uaccess.h

@@ -99,13 +99,13 @@ static inline int access_ok(int type, const void __user *addr,
 	if ((get_fs().seg < ((unsigned long)addr)) ||
 	if ((get_fs().seg < ((unsigned long)addr)) ||
 			(get_fs().seg < ((unsigned long)addr + size - 1))) {
 			(get_fs().seg < ((unsigned long)addr + size - 1))) {
 		pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
 		pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
-			type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+			type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
 			(u32)get_fs().seg);
 			(u32)get_fs().seg);
 		return 0;
 		return 0;
 	}
 	}
 ok:
 ok:
 	pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
 	pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
-			type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+			type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
 			(u32)get_fs().seg);
 			(u32)get_fs().seg);
 	return 1;
 	return 1;
 }
 }

+ 9 - 6
arch/mips/cavium-octeon/setup.c

@@ -428,13 +428,16 @@ static void octeon_restart(char *command)
  */
  */
 static void octeon_kill_core(void *arg)
 static void octeon_kill_core(void *arg)
 {
 {
-	mb();
-	if (octeon_is_simulation()) {
-		/* The simulator needs the watchdog to stop for dead cores */
-		cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+	if (octeon_is_simulation())
 		/* A break instruction causes the simulator stop a core */
 		/* A break instruction causes the simulator stop a core */
-		asm volatile ("sync\nbreak");
-	}
+		asm volatile ("break" ::: "memory");
+
+	local_irq_disable();
+	/* Disable watchdog on this core. */
+	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+	/* Spin in a low power mode. */
+	while (true)
+		asm volatile ("wait" ::: "memory");
 }
 }
 
 
 
 

+ 0 - 4
arch/mips/include/asm/kvm_host.h

@@ -496,10 +496,6 @@ struct kvm_mips_callbacks {
 			    uint32_t cause);
 			    uint32_t cause);
 	int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
 	int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
 			  uint32_t cause);
 			  uint32_t cause);
-	int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
-				    struct kvm_regs *regs);
-	int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
-				    struct kvm_regs *regs);
 };
 };
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);

+ 1 - 1
arch/mips/include/asm/mmu_context.h

@@ -117,7 +117,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 	if (! ((asid += ASID_INC) & ASID_MASK) ) {
 	if (! ((asid += ASID_INC) & ASID_MASK) ) {
 		if (cpu_has_vtag_icache)
 		if (cpu_has_vtag_icache)
 			flush_icache_all();
 			flush_icache_all();
-#ifdef CONFIG_VIRTUALIZATION
+#ifdef CONFIG_KVM
 		kvm_local_flush_tlb_all();      /* start new asid cycle */
 		kvm_local_flush_tlb_all();      /* start new asid cycle */
 #else
 #else
 		local_flush_tlb_all();	/* start new asid cycle */
 		local_flush_tlb_all();	/* start new asid cycle */

+ 32 - 0
arch/mips/include/asm/ptrace.h

@@ -16,6 +16,38 @@
 #include <asm/isadep.h>
 #include <asm/isadep.h>
 #include <uapi/asm/ptrace.h>
 #include <uapi/asm/ptrace.h>
 
 
+/*
+ * This struct defines the way the registers are stored on the stack during a
+ * system call/exception. As usual the registers k0/k1 aren't being saved.
+ */
+struct pt_regs {
+#ifdef CONFIG_32BIT
+	/* Pad bytes for argument save space on the stack. */
+	unsigned long pad0[6];
+#endif
+
+	/* Saved main processor registers. */
+	unsigned long regs[32];
+
+	/* Saved special registers. */
+	unsigned long cp0_status;
+	unsigned long hi;
+	unsigned long lo;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+	unsigned long acx;
+#endif
+	unsigned long cp0_badvaddr;
+	unsigned long cp0_cause;
+	unsigned long cp0_epc;
+#ifdef CONFIG_MIPS_MT_SMTC
+	unsigned long cp0_tcstatus;
+#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	unsigned long long mpl[3];	  /* MTM{0,1,2} */
+	unsigned long long mtp[3];	  /* MTP{0,1,2} */
+#endif
+} __aligned(8);
+
 struct task_struct;
 struct task_struct;
 
 
 extern int ptrace_getregs(struct task_struct *child, __s64 __user *data);
 extern int ptrace_getregs(struct task_struct *child, __s64 __user *data);

+ 107 - 27
arch/mips/include/uapi/asm/kvm.h

@@ -1,55 +1,135 @@
 /*
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Cavium, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 
 #ifndef __LINUX_KVM_MIPS_H
 #ifndef __LINUX_KVM_MIPS_H
 #define __LINUX_KVM_MIPS_H
 #define __LINUX_KVM_MIPS_H
 
 
 #include <linux/types.h>
 #include <linux/types.h>
 
 
-#define __KVM_MIPS
-
-#define N_MIPS_COPROC_REGS      32
-#define N_MIPS_COPROC_SEL   	8
+/*
+ * KVM MIPS specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
 
 
-/* for KVM_GET_REGS and KVM_SET_REGS */
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ *
+ * If Config[AT] is zero (32-bit CPU), the register contents are
+ * stored in the lower 32-bits of the struct kvm_regs fields and sign
+ * extended to 64-bits.
+ */
 struct kvm_regs {
 struct kvm_regs {
-	__u32 gprs[32];
-	__u32 hi;
-	__u32 lo;
-	__u32 pc;
-
-	__u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
-};
-
-/* for KVM_GET_SREGS and KVM_SET_SREGS */
-struct kvm_sregs {
+	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+	__u64 gpr[32];
+	__u64 hi;
+	__u64 lo;
+	__u64 pc;
 };
 };
 
 
-/* for KVM_GET_FPU and KVM_SET_FPU */
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ *
+ * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
+ * are zero filled.
+ */
 struct kvm_fpu {
 struct kvm_fpu {
+	__u64 fpr[32];
+	__u32 fir;
+	__u32 fccr;
+	__u32 fexr;
+	__u32 fenr;
+	__u32 fcsr;
+	__u32 pad;
 };
 };
 
 
+
+/*
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
+ * registers.  The id field is broken down as follows:
+ *
+ *  bits[2..0]   - Register 'sel' index.
+ *  bits[7..3]   - Register 'rd'  index.
+ *  bits[15..8]  - Must be zero.
+ *  bits[31..16] - 1 -> CP0 registers.
+ *  bits[51..32] - Must be zero.
+ *  bits[63..52] - As per linux/kvm.h
+ *
+ * Other sets registers may be added in the future.  Each set would
+ * have its own identifier in bits[31..16].
+ *
+ * The registers defined in struct kvm_regs are also accessible, the
+ * id values for these are below.
+ */
+
+#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
+
+/*
+ * KVM MIPS specific structures and definitions
+ *
+ */
 struct kvm_debug_exit_arch {
 struct kvm_debug_exit_arch {
+	__u64 epc;
 };
 };
 
 
 /* for KVM_SET_GUEST_DEBUG */
 /* for KVM_SET_GUEST_DEBUG */
 struct kvm_guest_debug_arch {
 struct kvm_guest_debug_arch {
 };
 };
 
 
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
 struct kvm_mips_interrupt {
 struct kvm_mips_interrupt {
 	/* in */
 	/* in */
 	__u32 cpu;
 	__u32 cpu;
 	__u32 irq;
 	__u32 irq;
 };
 };
 
 
-/* definition of registers in kvm_run */
-struct kvm_sync_regs {
-};
-
 #endif /* __LINUX_KVM_MIPS_H */
 #endif /* __LINUX_KVM_MIPS_H */

+ 2 - 15
arch/mips/include/uapi/asm/ptrace.h

@@ -22,16 +22,12 @@
 #define DSP_CONTROL	77
 #define DSP_CONTROL	77
 #define ACX		78
 #define ACX		78
 
 
+#ifndef __KERNEL__
 /*
 /*
  * This struct defines the way the registers are stored on the stack during a
  * This struct defines the way the registers are stored on the stack during a
  * system call/exception. As usual the registers k0/k1 aren't being saved.
  * system call/exception. As usual the registers k0/k1 aren't being saved.
  */
  */
 struct pt_regs {
 struct pt_regs {
-#ifdef CONFIG_32BIT
-	/* Pad bytes for argument save space on the stack. */
-	unsigned long pad0[6];
-#endif
-
 	/* Saved main processor registers. */
 	/* Saved main processor registers. */
 	unsigned long regs[32];
 	unsigned long regs[32];
 
 
@@ -39,20 +35,11 @@ struct pt_regs {
 	unsigned long cp0_status;
 	unsigned long cp0_status;
 	unsigned long hi;
 	unsigned long hi;
 	unsigned long lo;
 	unsigned long lo;
-#ifdef CONFIG_CPU_HAS_SMARTMIPS
-	unsigned long acx;
-#endif
 	unsigned long cp0_badvaddr;
 	unsigned long cp0_badvaddr;
 	unsigned long cp0_cause;
 	unsigned long cp0_cause;
 	unsigned long cp0_epc;
 	unsigned long cp0_epc;
-#ifdef CONFIG_MIPS_MT_SMTC
-	unsigned long cp0_tcstatus;
-#endif /* CONFIG_MIPS_MT_SMTC */
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-	unsigned long long mpl[3];	  /* MTM{0,1,2} */
-	unsigned long long mtp[3];	  /* MTP{0,1,2} */
-#endif
 } __attribute__ ((aligned (8)));
 } __attribute__ ((aligned (8)));
+#endif /* __KERNEL__ */
 
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 #define PTRACE_GETREGS		12
 #define PTRACE_GETREGS		12

+ 11 - 0
arch/mips/kernel/binfmt_elfn32.c

@@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
 #undef TASK_SIZE
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 #define TASK_SIZE TASK_SIZE32
 
 
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+	unsigned long jiffies = cputime_to_jiffies(cputime);
+
+	value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+	value->tv_sec = jiffies / HZ;
+}
+
 #include "../../../fs/binfmt_elf.c"
 #include "../../../fs/binfmt_elf.c"

+ 11 - 0
arch/mips/kernel/binfmt_elfo32.c

@@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
 #undef TASK_SIZE
 #undef TASK_SIZE
 #define TASK_SIZE TASK_SIZE32
 #define TASK_SIZE TASK_SIZE32
 
 
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+	unsigned long jiffies = cputime_to_jiffies(cputime);
+
+	value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+	value->tv_sec = jiffies / HZ;
+}
+
 #include "../../../fs/binfmt_elf.c"
 #include "../../../fs/binfmt_elf.c"

+ 4 - 0
arch/mips/kernel/ftrace.c

@@ -25,12 +25,16 @@
 #define MCOUNT_OFFSET_INSNS 4
 #define MCOUNT_OFFSET_INSNS 4
 #endif
 #endif
 
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+
 /* Arch override because MIPS doesn't need to run this from stop_machine() */
 /* Arch override because MIPS doesn't need to run this from stop_machine() */
 void arch_ftrace_update_code(int command)
 void arch_ftrace_update_code(int command)
 {
 {
 	ftrace_modify_all_code(command);
 	ftrace_modify_all_code(command);
 }
 }
 
 
+#endif
+
 /*
 /*
  * Check if the address is in kernel space
  * Check if the address is in kernel space
  *
  *

+ 7 - 6
arch/mips/kernel/idle.c

@@ -93,26 +93,27 @@ static void rm7k_wait_irqoff(void)
 }
 }
 
 
 /*
 /*
- * The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter.
- * alchemy/common/time.c may override cpu_wait!
+ * Au1 'wait' is only useful when the 32kHz counter is used as timer,
+ * since coreclock (and the cp0 counter) stops upon executing it. Only an
+ * interrupt can wake it, so they must be enabled before entering idle modes.
  */
  */
 static void au1k_wait(void)
 static void au1k_wait(void)
 {
 {
+	unsigned long c0status = read_c0_status() | 1;	/* irqs on */
+
 	__asm__(
 	__asm__(
 	"	.set	mips3			\n"
 	"	.set	mips3			\n"
 	"	cache	0x14, 0(%0)		\n"
 	"	cache	0x14, 0(%0)		\n"
 	"	cache	0x14, 32(%0)		\n"
 	"	cache	0x14, 32(%0)		\n"
 	"	sync				\n"
 	"	sync				\n"
-	"	nop				\n"
+	"	mtc0	%1, $12			\n" /* wr c0status */
 	"	wait				\n"
 	"	wait				\n"
 	"	nop				\n"
 	"	nop				\n"
 	"	nop				\n"
 	"	nop				\n"
 	"	nop				\n"
 	"	nop				\n"
 	"	nop				\n"
 	"	nop				\n"
 	"	.set	mips0			\n"
 	"	.set	mips0			\n"
-	: : "r" (au1k_wait));
-	local_irq_enable();
+	: : "r" (au1k_wait), "r" (c0status));
 }
 }
 
 
 static int __initdata nowait;
 static int __initdata nowait;

+ 1 - 0
arch/mips/kernel/rtlx.c

@@ -40,6 +40,7 @@
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/vpe.h>
 #include <asm/vpe.h>
 #include <asm/rtlx.h>
 #include <asm/rtlx.h>
+#include <asm/setup.h>
 
 
 static struct rtlx_info *rtlx;
 static struct rtlx_info *rtlx;
 static int major;
 static int major;

+ 15 - 13
arch/mips/kernel/traps.c

@@ -897,22 +897,24 @@ out_sigsegv:
 
 
 asmlinkage void do_tr(struct pt_regs *regs)
 asmlinkage void do_tr(struct pt_regs *regs)
 {
 {
-	unsigned int opcode, tcode = 0;
+	u32 opcode, tcode = 0;
 	u16 instr[2];
 	u16 instr[2];
-	unsigned long epc = exception_epc(regs);
+	unsigned long epc = msk_isa16_mode(exception_epc(regs));
 
 
-	if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
-		(__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+	if (get_isa16_mode(regs->cp0_epc)) {
+		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
+		    __get_user(instr[1], (u16 __user *)(epc + 2)))
 			goto out_sigsegv;
 			goto out_sigsegv;
-	opcode = (instr[0] << 16) | instr[1];
-
-	/* Immediate versions don't provide a code.  */
-	if (!(opcode & OPCODE)) {
-		if (get_isa16_mode(regs->cp0_epc))
-			/* microMIPS */
-			tcode = (opcode >> 12) & 0x1f;
-		else
-			tcode = ((opcode >> 6) & ((1 << 10) - 1));
+		opcode = (instr[0] << 16) | instr[1];
+		/* Immediate versions don't provide a code.  */
+		if (!(opcode & OPCODE))
+			tcode = (opcode >> 12) & ((1 << 4) - 1);
+	} else {
+		if (__get_user(opcode, (u32 __user *)epc))
+			goto out_sigsegv;
+		/* Immediate versions don't provide a code.  */
+		if (!(opcode & OPCODE))
+			tcode = (opcode >> 6) & ((1 << 10) - 1);
 	}
 	}
 
 
 	do_trap_or_bp(regs, tcode, "Trap");
 	do_trap_or_bp(regs, tcode, "Trap");

+ 284 - 21
arch/mips/kvm/kvm_mips.c

@@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 long
 long
 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 {
 {
-	return -EINVAL;
+	return -ENOIOCTLCMD;
 }
 }
 
 
 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
@@ -401,7 +401,7 @@ int
 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 				    struct kvm_guest_debug *dbg)
 				    struct kvm_guest_debug *dbg)
 {
 {
-	return -EINVAL;
+	return -ENOIOCTLCMD;
 }
 }
 
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -475,14 +475,248 @@ int
 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 				struct kvm_mp_state *mp_state)
 				struct kvm_mp_state *mp_state)
 {
 {
-	return -EINVAL;
+	return -ENOIOCTLCMD;
 }
 }
 
 
 int
 int
 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 				struct kvm_mp_state *mp_state)
 				struct kvm_mp_state *mp_state)
 {
 {
-	return -EINVAL;
+	return -ENOIOCTLCMD;
+}
+
+#define MIPS_CP0_32(_R, _S)					\
+	(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
+
+#define MIPS_CP0_64(_R, _S)					\
+	(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
+
+#define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
+#define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
+#define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
+#define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
+#define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
+#define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
+#define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
+#define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
+#define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
+#define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
+#define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
+#define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
+#define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
+
+static u64 kvm_mips_get_one_regs[] = {
+	KVM_REG_MIPS_R0,
+	KVM_REG_MIPS_R1,
+	KVM_REG_MIPS_R2,
+	KVM_REG_MIPS_R3,
+	KVM_REG_MIPS_R4,
+	KVM_REG_MIPS_R5,
+	KVM_REG_MIPS_R6,
+	KVM_REG_MIPS_R7,
+	KVM_REG_MIPS_R8,
+	KVM_REG_MIPS_R9,
+	KVM_REG_MIPS_R10,
+	KVM_REG_MIPS_R11,
+	KVM_REG_MIPS_R12,
+	KVM_REG_MIPS_R13,
+	KVM_REG_MIPS_R14,
+	KVM_REG_MIPS_R15,
+	KVM_REG_MIPS_R16,
+	KVM_REG_MIPS_R17,
+	KVM_REG_MIPS_R18,
+	KVM_REG_MIPS_R19,
+	KVM_REG_MIPS_R20,
+	KVM_REG_MIPS_R21,
+	KVM_REG_MIPS_R22,
+	KVM_REG_MIPS_R23,
+	KVM_REG_MIPS_R24,
+	KVM_REG_MIPS_R25,
+	KVM_REG_MIPS_R26,
+	KVM_REG_MIPS_R27,
+	KVM_REG_MIPS_R28,
+	KVM_REG_MIPS_R29,
+	KVM_REG_MIPS_R30,
+	KVM_REG_MIPS_R31,
+
+	KVM_REG_MIPS_HI,
+	KVM_REG_MIPS_LO,
+	KVM_REG_MIPS_PC,
+
+	KVM_REG_MIPS_CP0_INDEX,
+	KVM_REG_MIPS_CP0_CONTEXT,
+	KVM_REG_MIPS_CP0_PAGEMASK,
+	KVM_REG_MIPS_CP0_WIRED,
+	KVM_REG_MIPS_CP0_BADVADDR,
+	KVM_REG_MIPS_CP0_ENTRYHI,
+	KVM_REG_MIPS_CP0_STATUS,
+	KVM_REG_MIPS_CP0_CAUSE,
+	/* EPC set via kvm_regs, et al. */
+	KVM_REG_MIPS_CP0_CONFIG,
+	KVM_REG_MIPS_CP0_CONFIG1,
+	KVM_REG_MIPS_CP0_CONFIG2,
+	KVM_REG_MIPS_CP0_CONFIG3,
+	KVM_REG_MIPS_CP0_CONFIG7,
+	KVM_REG_MIPS_CP0_ERROREPC
+};
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+			    const struct kvm_one_reg *reg)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	s64 v;
+
+	switch (reg->id) {
+	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+		break;
+	case KVM_REG_MIPS_HI:
+		v = (long)vcpu->arch.hi;
+		break;
+	case KVM_REG_MIPS_LO:
+		v = (long)vcpu->arch.lo;
+		break;
+	case KVM_REG_MIPS_PC:
+		v = (long)vcpu->arch.pc;
+		break;
+
+	case KVM_REG_MIPS_CP0_INDEX:
+		v = (long)kvm_read_c0_guest_index(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONTEXT:
+		v = (long)kvm_read_c0_guest_context(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_PAGEMASK:
+		v = (long)kvm_read_c0_guest_pagemask(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_WIRED:
+		v = (long)kvm_read_c0_guest_wired(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_BADVADDR:
+		v = (long)kvm_read_c0_guest_badvaddr(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_ENTRYHI:
+		v = (long)kvm_read_c0_guest_entryhi(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_STATUS:
+		v = (long)kvm_read_c0_guest_status(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CAUSE:
+		v = (long)kvm_read_c0_guest_cause(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_ERROREPC:
+		v = (long)kvm_read_c0_guest_errorepc(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG:
+		v = (long)kvm_read_c0_guest_config(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG1:
+		v = (long)kvm_read_c0_guest_config1(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG2:
+		v = (long)kvm_read_c0_guest_config2(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG3:
+		v = (long)kvm_read_c0_guest_config3(cop0);
+		break;
+	case KVM_REG_MIPS_CP0_CONFIG7:
+		v = (long)kvm_read_c0_guest_config7(cop0);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+		return put_user(v, uaddr64);
+	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+		u32 v32 = (u32)v;
+		return put_user(v32, uaddr32);
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+			    const struct kvm_one_reg *reg)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	u64 v;
+
+	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+		if (get_user(v, uaddr64) != 0)
+			return -EFAULT;
+	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+		s32 v32;
+
+		if (get_user(v32, uaddr32) != 0)
+			return -EFAULT;
+		v = (s64)v32;
+	} else {
+		return -EINVAL;
+	}
+
+	switch (reg->id) {
+	case KVM_REG_MIPS_R0:
+		/* Silently ignore requests to set $0 */
+		break;
+	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+		break;
+	case KVM_REG_MIPS_HI:
+		vcpu->arch.hi = v;
+		break;
+	case KVM_REG_MIPS_LO:
+		vcpu->arch.lo = v;
+		break;
+	case KVM_REG_MIPS_PC:
+		vcpu->arch.pc = v;
+		break;
+
+	case KVM_REG_MIPS_CP0_INDEX:
+		kvm_write_c0_guest_index(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_CONTEXT:
+		kvm_write_c0_guest_context(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_PAGEMASK:
+		kvm_write_c0_guest_pagemask(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_WIRED:
+		kvm_write_c0_guest_wired(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_BADVADDR:
+		kvm_write_c0_guest_badvaddr(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_ENTRYHI:
+		kvm_write_c0_guest_entryhi(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_STATUS:
+		kvm_write_c0_guest_status(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_CAUSE:
+		kvm_write_c0_guest_cause(cop0, v);
+		break;
+	case KVM_REG_MIPS_CP0_ERROREPC:
+		kvm_write_c0_guest_errorepc(cop0, v);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
 }
 }
 
 
 long
 long
@@ -491,9 +725,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 	struct kvm_vcpu *vcpu = filp->private_data;
 	struct kvm_vcpu *vcpu = filp->private_data;
 	void __user *argp = (void __user *)arg;
 	void __user *argp = (void __user *)arg;
 	long r;
 	long r;
-	int intr;
 
 
 	switch (ioctl) {
 	switch (ioctl) {
+	case KVM_SET_ONE_REG:
+	case KVM_GET_ONE_REG: {
+		struct kvm_one_reg reg;
+		if (copy_from_user(&reg, argp, sizeof(reg)))
+			return -EFAULT;
+		if (ioctl == KVM_SET_ONE_REG)
+			return kvm_mips_set_reg(vcpu, &reg);
+		else
+			return kvm_mips_get_reg(vcpu, &reg);
+	}
+	case KVM_GET_REG_LIST: {
+		struct kvm_reg_list __user *user_list = argp;
+		u64 __user *reg_dest;
+		struct kvm_reg_list reg_list;
+		unsigned n;
+
+		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+			return -EFAULT;
+		n = reg_list.n;
+		reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+			return -EFAULT;
+		if (n < reg_list.n)
+			return -E2BIG;
+		reg_dest = user_list->reg;
+		if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
+				 sizeof(kvm_mips_get_one_regs)))
+			return -EFAULT;
+		return 0;
+	}
 	case KVM_NMI:
 	case KVM_NMI:
 		/* Treat the NMI as a CPU reset */
 		/* Treat the NMI as a CPU reset */
 		r = kvm_mips_reset_vcpu(vcpu);
 		r = kvm_mips_reset_vcpu(vcpu);
@@ -505,8 +768,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 			if (copy_from_user(&irq, argp, sizeof(irq)))
 			if (copy_from_user(&irq, argp, sizeof(irq)))
 				goto out;
 				goto out;
 
 
-			intr = (int)irq.irq;
-
 			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
 			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
 				  irq.irq);
 				  irq.irq);
 
 
@@ -514,7 +775,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 			break;
 			break;
 		}
 		}
 	default:
 	default:
-		r = -EINVAL;
+		r = -ENOIOCTLCMD;
 	}
 	}
 
 
 out:
 out:
@@ -565,7 +826,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 
 
 	switch (ioctl) {
 	switch (ioctl) {
 	default:
 	default:
-		r = -EINVAL;
+		r = -ENOIOCTLCMD;
 	}
 	}
 
 
 	return r;
 	return r;
@@ -593,13 +854,13 @@ void kvm_arch_exit(void)
 int
 int
 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
 {
-	return -ENOTSUPP;
+	return -ENOIOCTLCMD;
 }
 }
 
 
 int
 int
 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
 {
-	return -ENOTSUPP;
+	return -ENOIOCTLCMD;
 }
 }
 
 
 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -609,12 +870,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 {
-	return -ENOTSUPP;
+	return -ENOIOCTLCMD;
 }
 }
 
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
 {
-	return -ENOTSUPP;
+	return -ENOIOCTLCMD;
 }
 }
 
 
 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
@@ -627,6 +888,9 @@ int kvm_dev_ioctl_check_extension(long ext)
 	int r;
 	int r;
 
 
 	switch (ext) {
 	switch (ext) {
+	case KVM_CAP_ONE_REG:
+		r = 1;
+		break;
 	case KVM_CAP_COALESCED_MMIO:
 	case KVM_CAP_COALESCED_MMIO:
 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 		break;
 		break;
@@ -635,7 +899,6 @@ int kvm_dev_ioctl_check_extension(long ext)
 		break;
 		break;
 	}
 	}
 	return r;
 	return r;
-
 }
 }
 
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -677,28 +940,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
 {
 	int i;
 	int i;
 
 
-	for (i = 0; i < 32; i++)
-		vcpu->arch.gprs[i] = regs->gprs[i];
-
+	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+		vcpu->arch.gprs[i] = regs->gpr[i];
+	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
 	vcpu->arch.hi = regs->hi;
 	vcpu->arch.hi = regs->hi;
 	vcpu->arch.lo = regs->lo;
 	vcpu->arch.lo = regs->lo;
 	vcpu->arch.pc = regs->pc;
 	vcpu->arch.pc = regs->pc;
 
 
-	return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+	return 0;
 }
 }
 
 
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
 {
 	int i;
 	int i;
 
 
-	for (i = 0; i < 32; i++)
-		regs->gprs[i] = vcpu->arch.gprs[i];
+	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+		regs->gpr[i] = vcpu->arch.gprs[i];
 
 
 	regs->hi = vcpu->arch.hi;
 	regs->hi = vcpu->arch.hi;
 	regs->lo = vcpu->arch.lo;
 	regs->lo = vcpu->arch.lo;
 	regs->pc = vcpu->arch.pc;
 	regs->pc = vcpu->arch.pc;
 
 
-	return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+	return 0;
 }
 }
 
 
 void kvm_mips_comparecount_func(unsigned long data)
 void kvm_mips_comparecount_func(unsigned long data)

+ 0 - 50
arch/mips/kvm/kvm_trap_emul.c

@@ -345,54 +345,6 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
 	return ret;
 	return ret;
 }
 }
 
 
-static int
-kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-	struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-	kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
-	kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
-	kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
-	kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
-	kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
-
-	kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
-	kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
-	kvm_write_c0_guest_pagemask(cop0,
-				    regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
-	kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
-	kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
-
-	return 0;
-}
-
-static int
-kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-	struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-	regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
-	regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
-	regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
-	regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
-	regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
-
-	regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
-	regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
-	regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
-	    kvm_read_c0_guest_pagemask(cop0);
-	regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
-	regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
-
-	regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
-	regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
-	regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
-	regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
-	regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
-
-	return 0;
-}
-
 static int kvm_trap_emul_vm_init(struct kvm *kvm)
 static int kvm_trap_emul_vm_init(struct kvm *kvm)
 {
 {
 	return 0;
 	return 0;
@@ -471,8 +423,6 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
 	.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
 	.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
 	.irq_deliver = kvm_mips_irq_deliver_cb,
 	.irq_deliver = kvm_mips_irq_deliver_cb,
 	.irq_clear = kvm_mips_irq_clear_cb,
 	.irq_clear = kvm_mips_irq_clear_cb,
-	.vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
-	.vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
 };
 };
 
 
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)

+ 0 - 4
arch/mips/mm/tlbex.c

@@ -301,10 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata;
 static struct uasm_label labels[128] __cpuinitdata;
 static struct uasm_label labels[128] __cpuinitdata;
 static struct uasm_reloc relocs[128] __cpuinitdata;
 static struct uasm_reloc relocs[128] __cpuinitdata;
 
 
-#ifdef CONFIG_64BIT
-static int check_for_high_segbits __cpuinitdata;
-#endif
-
 static int check_for_high_segbits __cpuinitdata;
 static int check_for_high_segbits __cpuinitdata;
 
 
 static unsigned int kscratch_used_mask __cpuinitdata;
 static unsigned int kscratch_used_mask __cpuinitdata;

+ 1 - 1
arch/mips/ralink/of.c

@@ -88,7 +88,7 @@ void __init plat_mem_setup(void)
 	__dt_setup_arch(&__dtb_start);
 	__dt_setup_arch(&__dtb_start);
 
 
 	if (soc_info.mem_size)
 	if (soc_info.mem_size)
-		add_memory_region(soc_info.mem_base, soc_info.mem_size,
+		add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
 				  BOOT_MEM_RAM);
 				  BOOT_MEM_RAM);
 	else
 	else
 		detect_memory_region(soc_info.mem_base,
 		detect_memory_region(soc_info.mem_base,

+ 1 - 1
arch/parisc/Makefile

@@ -66,7 +66,7 @@ KBUILD_CFLAGS_KERNEL += -mlong-calls
 endif
 endif
 
 
 # select which processor to optimise for
 # select which processor to optimise for
-cflags-$(CONFIG_PA7100)		+= -march=1.1 -mschedule=7100
+cflags-$(CONFIG_PA7000)		+= -march=1.1 -mschedule=7100
 cflags-$(CONFIG_PA7200)		+= -march=1.1 -mschedule=7200
 cflags-$(CONFIG_PA7200)		+= -march=1.1 -mschedule=7200
 cflags-$(CONFIG_PA7100LC)	+= -march=1.1 -mschedule=7100LC
 cflags-$(CONFIG_PA7100LC)	+= -march=1.1 -mschedule=7100LC
 cflags-$(CONFIG_PA7300LC)	+= -march=1.1 -mschedule=7300
 cflags-$(CONFIG_PA7300LC)	+= -march=1.1 -mschedule=7300

+ 1 - 4
arch/parisc/include/asm/mmzone.h

@@ -39,17 +39,14 @@ extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
 static inline int pfn_to_nid(unsigned long pfn)
 static inline int pfn_to_nid(unsigned long pfn)
 {
 {
 	unsigned int i;
 	unsigned int i;
-	unsigned char r;
 
 
 	if (unlikely(pfn_is_io(pfn)))
 	if (unlikely(pfn_is_io(pfn)))
 		return 0;
 		return 0;
 
 
 	i = pfn >> PFNNID_SHIFT;
 	i = pfn >> PFNNID_SHIFT;
 	BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
 	BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
-	r = pfnnid_map[i];
-	BUG_ON(r == 0xff);
 
 
-	return (int)r;
+	return (int)pfnnid_map[i];
 }
 }
 
 
 static inline int pfn_valid(int pfn)
 static inline int pfn_valid(int pfn)

+ 1 - 1
arch/parisc/kernel/drivers.c

@@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
 static void setup_bus_id(struct parisc_device *padev)
 static void setup_bus_id(struct parisc_device *padev)
 {
 {
 	struct hardware_path path;
 	struct hardware_path path;
-	char name[20];
+	char name[28];
 	char *output = name;
 	char *output = name;
 	int i;
 	int i;
 
 

+ 2 - 1
arch/parisc/kernel/setup.c

@@ -69,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
 		/* called from hpux boot loader */
 		/* called from hpux boot loader */
 		boot_command_line[0] = '\0';
 		boot_command_line[0] = '\0';
 	} else {
 	} else {
-		strcpy(boot_command_line, (char *)__va(boot_args[1]));
+		strlcpy(boot_command_line, (char *)__va(boot_args[1]),
+			COMMAND_LINE_SIZE);
 
 
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
 		if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
 		if (boot_args[2] != 0) /* did palo pass us a ramdisk? */

+ 10 - 7
arch/powerpc/include/asm/cputable.h

@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
 #define CPU_FTR_CFAR			LONG_ASM_CONST(0x0100000000000000)
 #define CPU_FTR_CFAR			LONG_ASM_CONST(0x0100000000000000)
 #define	CPU_FTR_HAS_PPR			LONG_ASM_CONST(0x0200000000000000)
 #define	CPU_FTR_HAS_PPR			LONG_ASM_CONST(0x0200000000000000)
 #define CPU_FTR_DAWR			LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DAWR			LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_DABRX			LONG_ASM_CONST(0x0800000000000000)
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
 	    CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
 	    CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
-	    CPU_FTR_HVMODE)
+	    CPU_FTR_HVMODE | CPU_FTR_DABRX)
 #define CPU_FTRS_POWER5	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 #define CPU_FTRS_POWER5	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
 #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | \
 	    CPU_FTR_COHERENT_ICACHE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
 	    CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+	    CPU_FTR_DABRX)
 #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
 	    CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
 	    CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
 	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
 	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
 	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
 	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
-	    CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR)
+	    CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
 #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_PAUSE_ZERO  | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
 	    CPU_FTR_PAUSE_ZERO  | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
-	    CPU_FTR_UNALIGNED_LD_STD)
+	    CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
 #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
-	    CPU_FTR_PURR | CPU_FTR_REAL_LE)
+	    CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
 #define CPU_FTRS_COMPATIBLE	(CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
 #define CPU_FTRS_COMPATIBLE	(CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
 
 
 #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
 #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
-		     CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
+		     CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
+		     CPU_FTR_ICSWX | CPU_FTR_DABRX )
 
 
 #ifdef __powerpc64__
 #ifdef __powerpc64__
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E

+ 1 - 1
arch/powerpc/include/asm/exception-64s.h

@@ -513,7 +513,7 @@ label##_common:							\
  */
  */
 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr)		  \
 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr)		  \
 	EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
 	EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
-			 FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
+			 FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
 
 
 /*
 /*
  * When the idle code in power4_idle puts the CPU into NAP mode,
  * When the idle code in power4_idle puts the CPU into NAP mode,

+ 1 - 0
arch/powerpc/include/asm/hvcall.h

@@ -264,6 +264,7 @@
 #define H_GET_MPP		0x2D4
 #define H_GET_MPP		0x2D4
 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC
 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC
 #define H_BEST_ENERGY		0x2F4
 #define H_BEST_ENERGY		0x2F4
+#define H_XIRR_X		0x2FC
 #define H_RANDOM		0x300
 #define H_RANDOM		0x300
 #define H_COP			0x304
 #define H_COP			0x304
 #define H_GET_MPP_X		0x314
 #define H_GET_MPP_X		0x314

+ 10 - 6
arch/powerpc/include/asm/kvm_asm.h

@@ -54,8 +54,16 @@
 #define BOOKE_INTERRUPT_DEBUG 15
 #define BOOKE_INTERRUPT_DEBUG 15
 
 
 /* E500 */
 /* E500 */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
+/*
+ * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
+ */
+#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
+				BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
 #define BOOKE_INTERRUPT_DOORBELL 36
 #define BOOKE_INTERRUPT_DOORBELL 36
@@ -67,10 +75,6 @@
 #define BOOKE_INTERRUPT_HV_SYSCALL 40
 #define BOOKE_INTERRUPT_HV_SYSCALL 40
 #define BOOKE_INTERRUPT_HV_PRIV 41
 #define BOOKE_INTERRUPT_HV_PRIV 41
 
 
-/* altivec */
-#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
-#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
-
 /* book3s */
 /* book3s */
 
 
 #define BOOK3S_INTERRUPT_SYSTEM_RESET	0x100
 #define BOOK3S_INTERRUPT_SYSTEM_RESET	0x100

+ 11 - 0
arch/powerpc/include/asm/ppc_asm.h

@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
 #define PPC440EP_ERR42
 #define PPC440EP_ERR42
 #endif
 #endif
 
 
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly).  The embedded and server mnemonics for
+ * dcbt are different so we use machine "power4" here explicitly.
+ */
+#define DCBT_STOP_ALL_STREAM_IDS(scratch)	\
+.machine push ;					\
+.machine "power4" ;				\
+       lis     scratch,0x60000000@h;		\
+       dcbt    r0,scratch,0b01010;		\
+.machine pop
+
 /*
 /*
  * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
  * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
  * keep the address intact to be compatible with code shared with
  * keep the address intact to be compatible with code shared with

+ 4 - 9
arch/powerpc/include/asm/processor.h

@@ -409,21 +409,16 @@ static inline void prefetchw(const void *x)
 #endif
 #endif
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 {
 {
-	unsigned long sp;
-
 	if (is_32)
 	if (is_32)
-		sp = regs->gpr[1] & 0x0ffffffffUL;
-	else
-		sp = regs->gpr[1];
-
+		return sp & 0x0ffffffffUL;
 	return sp;
 	return sp;
 }
 }
 #else
 #else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 {
 {
-	return regs->gpr[1];
+	return sp;
 }
 }
 #endif
 #endif
 
 

+ 0 - 11
arch/powerpc/include/asm/reg.h

@@ -111,17 +111,6 @@
 #define MSR_TM_TRANSACTIONAL(x)	(((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_TRANSACTIONAL(x)	(((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)	(((x) & MSR_TS_MASK) == MSR_TS_S)
 #define MSR_TM_SUSPENDED(x)	(((x) & MSR_TS_MASK) == MSR_TS_S)
 
 
-/* Reason codes describing kernel causes for transaction aborts.  By
-   convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
-   the failure is persistent.
-*/
-#define TM_CAUSE_RESCHED	0xfe
-#define TM_CAUSE_TLBI		0xfc
-#define TM_CAUSE_FAC_UNAV	0xfa
-#define TM_CAUSE_SYSCALL	0xf9 /* Persistent */
-#define TM_CAUSE_MISC		0xf6
-#define TM_CAUSE_SIGNAL		0xf4
-
 #if defined(CONFIG_PPC_BOOK3S_64)
 #if defined(CONFIG_PPC_BOOK3S_64)
 #define MSR_64BIT	MSR_SF
 #define MSR_64BIT	MSR_SF
 
 

+ 3 - 0
arch/powerpc/include/asm/signal.h

@@ -3,5 +3,8 @@
 
 
 #define __ARCH_HAS_SA_RESTORER
 #define __ARCH_HAS_SA_RESTORER
 #include <uapi/asm/signal.h>
 #include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
 
 
 #endif /* _ASM_POWERPC_SIGNAL_H */
 #endif /* _ASM_POWERPC_SIGNAL_H */

+ 2 - 0
arch/powerpc/include/asm/tm.h

@@ -5,6 +5,8 @@
  * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
  * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
  */
  */
 
 
+#include <uapi/asm/tm.h>
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 extern void do_load_up_transact_fpu(struct thread_struct *thread);
 extern void do_load_up_transact_fpu(struct thread_struct *thread);
 extern void do_load_up_transact_altivec(struct thread_struct *thread);
 extern void do_load_up_transact_altivec(struct thread_struct *thread);

+ 1 - 0
arch/powerpc/include/uapi/asm/Kbuild

@@ -40,6 +40,7 @@ header-y += statfs.h
 header-y += swab.h
 header-y += swab.h
 header-y += termbits.h
 header-y += termbits.h
 header-y += termios.h
 header-y += termios.h
+header-y += tm.h
 header-y += types.h
 header-y += types.h
 header-y += ucontext.h
 header-y += ucontext.h
 header-y += unistd.h
 header-y += unistd.h

+ 18 - 0
arch/powerpc/include/uapi/asm/tm.h

@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts.  By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent.  PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT	0x01
+#define TM_CAUSE_RESCHED	0xde
+#define TM_CAUSE_TLBI		0xdc
+#define TM_CAUSE_FAC_UNAV	0xda
+#define TM_CAUSE_SYSCALL	0xd8  /* future use */
+#define TM_CAUSE_MISC		0xd6  /* future use */
+#define TM_CAUSE_SIGNAL		0xd4
+#define TM_CAUSE_ALIGNMENT	0xd2
+#define TM_CAUSE_EMULATE	0xd0
+
+#endif

+ 3 - 3
arch/powerpc/kernel/cputable.c

@@ -452,7 +452,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.mmu_features		= MMU_FTRS_POWER8,
 		.mmu_features		= MMU_FTRS_POWER8,
 		.icache_bsize		= 128,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.dcache_bsize		= 128,
-		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.oprofile_type		= PPC_OPROFILE_INVALID,
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.cpu_setup		= __setup_cpu_power8,
 		.cpu_setup		= __setup_cpu_power8,
 		.cpu_restore		= __restore_cpu_power8,
 		.cpu_restore		= __restore_cpu_power8,
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_name		= "POWER7+ (raw)",
 		.cpu_name		= "POWER7+ (raw)",
 		.cpu_features		= CPU_FTRS_POWER7,
 		.cpu_features		= CPU_FTRS_POWER7,
 		.cpu_user_features	= COMMON_USER_POWER7,
 		.cpu_user_features	= COMMON_USER_POWER7,
-		.cpu_user_features	= COMMON_USER2_POWER7,
+		.cpu_user_features2	= COMMON_USER2_POWER7,
 		.mmu_features		= MMU_FTRS_POWER7,
 		.mmu_features		= MMU_FTRS_POWER7,
 		.icache_bsize		= 128,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.dcache_bsize		= 128,
@@ -507,7 +507,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.num_pmcs		= 6,
 		.num_pmcs		= 6,
 		.pmc_type		= PPC_PMC_IBM,
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power8",
 		.oprofile_cpu_type	= "ppc64/power8",
-		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.oprofile_type		= PPC_OPROFILE_INVALID,
 		.cpu_setup		= __setup_cpu_power8,
 		.cpu_setup		= __setup_cpu_power8,
 		.cpu_restore		= __restore_cpu_power8,
 		.cpu_restore		= __restore_cpu_power8,
 		.platform		= "power8",
 		.platform		= "power8",

+ 1 - 1
arch/powerpc/kernel/entry_32.S

@@ -849,7 +849,7 @@ resume_kernel:
 	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 	CURRENT_THREAD_INFO(r9, r1)
 	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r8,TI_FLAGS(r9)
 	lwz	r8,TI_FLAGS(r9)
-	andis.	r8,r8,_TIF_EMULATE_STACK_STORE@h
+	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
 	beq+	1f
 	beq+	1f
 
 
 	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
 	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */

+ 7 - 28
arch/powerpc/kernel/entry_64.S

@@ -465,20 +465,6 @@ BEGIN_FTR_SECTION
 	std	r0, THREAD_EBBHR(r3)
 	std	r0, THREAD_EBBHR(r3)
 	mfspr	r0, SPRN_EBBRR
 	mfspr	r0, SPRN_EBBRR
 	std	r0, THREAD_EBBRR(r3)
 	std	r0, THREAD_EBBRR(r3)
-
-	/* PMU registers made user read/(write) by EBB */
-	mfspr	r0, SPRN_SIAR
-	std	r0, THREAD_SIAR(r3)
-	mfspr	r0, SPRN_SDAR
-	std	r0, THREAD_SDAR(r3)
-	mfspr	r0, SPRN_SIER
-	std	r0, THREAD_SIER(r3)
-	mfspr	r0, SPRN_MMCR0
-	std	r0, THREAD_MMCR0(r3)
-	mfspr	r0, SPRN_MMCR2
-	std	r0, THREAD_MMCR2(r3)
-	mfspr	r0, SPRN_MMCRA
-	std	r0, THREAD_MMCRA(r3)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 #endif
 #endif
 
 
@@ -501,6 +487,13 @@ BEGIN_FTR_SECTION
 	ldarx	r6,0,r1
 	ldarx	r6,0,r1
 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
 
 
+#ifdef CONFIG_PPC_BOOK3S
+/* Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+	DCBT_STOP_ALL_STREAM_IDS(r6)
+#endif
+
 	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
 	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
 	std	r6,PACACURRENT(r13)	/* Set new 'current' */
 	std	r6,PACACURRENT(r13)	/* Set new 'current' */
 
 
@@ -574,20 +567,6 @@ BEGIN_FTR_SECTION
 	ld	r0, THREAD_EBBRR(r4)
 	ld	r0, THREAD_EBBRR(r4)
 	mtspr	SPRN_EBBRR, r0
 	mtspr	SPRN_EBBRR, r0
 
 
-	/* PMU registers made user read/(write) by EBB */
-	ld	r0, THREAD_SIAR(r4)
-	mtspr	SPRN_SIAR, r0
-	ld	r0, THREAD_SDAR(r4)
-	mtspr	SPRN_SDAR, r0
-	ld	r0, THREAD_SIER(r4)
-	mtspr	SPRN_SIER, r0
-	ld	r0, THREAD_MMCR0(r4)
-	mtspr	SPRN_MMCR0, r0
-	ld	r0, THREAD_MMCR2(r4)
-	mtspr	SPRN_MMCR2, r0
-	ld	r0, THREAD_MMCRA(r4)
-	mtspr	SPRN_MMCRA, r0
-
 	ld	r0,THREAD_TAR(r4)
 	ld	r0,THREAD_TAR(r4)
 	mtspr	SPRN_TAR,r0
 	mtspr	SPRN_TAR,r0
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

+ 27 - 65
arch/powerpc/kernel/exceptions-64s.S

@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
 	xori	r10,r10,(MSR_FE0|MSR_FE1)
 	xori	r10,r10,(MSR_FE0|MSR_FE1)
 	mtmsrd	r10
 	mtmsrd	r10
 	sync
 	sync
-	fmr	0,0
-	fmr	1,1
-	fmr	2,2
-	fmr	3,3
-	fmr	4,4
-	fmr	5,5
-	fmr	6,6
-	fmr	7,7
-	fmr	8,8
-	fmr	9,9
-	fmr	10,10
-	fmr	11,11
-	fmr	12,12
-	fmr	13,13
-	fmr	14,14
-	fmr	15,15
-	fmr	16,16
-	fmr	17,17
-	fmr	18,18
-	fmr	19,19
-	fmr	20,20
-	fmr	21,21
-	fmr	22,22
-	fmr	23,23
-	fmr	24,24
-	fmr	25,25
-	fmr	26,26
-	fmr	27,27
-	fmr	28,28
-	fmr	29,29
-	fmr	30,30
-	fmr	31,31
+
+#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
+#define FMR4(n)  FMR2(n) ; FMR2(n+2)
+#define FMR8(n)  FMR4(n) ; FMR4(n+4)
+#define FMR16(n) FMR8(n) ; FMR8(n+8)
+#define FMR32(n) FMR16(n) ; FMR16(n+16)
+	FMR32(0)
+
 FTR_SECTION_ELSE
 FTR_SECTION_ELSE
 /*
 /*
  * To denormalise we need to move a copy of the register to itself.
  * To denormalise we need to move a copy of the register to itself.
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE
 	oris	r10,r10,MSR_VSX@h
 	oris	r10,r10,MSR_VSX@h
 	mtmsrd	r10
 	mtmsrd	r10
 	sync
 	sync
-	XVCPSGNDP(0,0,0)
-	XVCPSGNDP(1,1,1)
-	XVCPSGNDP(2,2,2)
-	XVCPSGNDP(3,3,3)
-	XVCPSGNDP(4,4,4)
-	XVCPSGNDP(5,5,5)
-	XVCPSGNDP(6,6,6)
-	XVCPSGNDP(7,7,7)
-	XVCPSGNDP(8,8,8)
-	XVCPSGNDP(9,9,9)
-	XVCPSGNDP(10,10,10)
-	XVCPSGNDP(11,11,11)
-	XVCPSGNDP(12,12,12)
-	XVCPSGNDP(13,13,13)
-	XVCPSGNDP(14,14,14)
-	XVCPSGNDP(15,15,15)
-	XVCPSGNDP(16,16,16)
-	XVCPSGNDP(17,17,17)
-	XVCPSGNDP(18,18,18)
-	XVCPSGNDP(19,19,19)
-	XVCPSGNDP(20,20,20)
-	XVCPSGNDP(21,21,21)
-	XVCPSGNDP(22,22,22)
-	XVCPSGNDP(23,23,23)
-	XVCPSGNDP(24,24,24)
-	XVCPSGNDP(25,25,25)
-	XVCPSGNDP(26,26,26)
-	XVCPSGNDP(27,27,27)
-	XVCPSGNDP(28,28,28)
-	XVCPSGNDP(29,29,29)
-	XVCPSGNDP(30,30,30)
-	XVCPSGNDP(31,31,31)
+
+#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
+#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
+#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
+#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
+#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
+	XVCPSGNDP32(0)
+
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+	b	denorm_done
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER8 we need to do that for all 64 VSX registers
+ */
+	XVCPSGNDP32(32)
+denorm_done:
 	mtspr	SPRN_HSRR0,r11
 	mtspr	SPRN_HSRR0,r11
 	mtcrf	0x80,r9
 	mtcrf	0x80,r9
 	ld	r9,PACA_EXGEN+EX_R9(r13)
 	ld	r9,PACA_EXGEN+EX_R9(r13)
@@ -721,7 +683,7 @@ machine_check_common:
 	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
 	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
 	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
 	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
 	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
 	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
-	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
 	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
 	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
 #ifdef CONFIG_PPC_DOORBELL
 #ifdef CONFIG_PPC_DOORBELL
 	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
 	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)

+ 1 - 1
arch/powerpc/kernel/irq.c

@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
 	 * in case we also had a rollover while hard disabled
 	 * in case we also had a rollover while hard disabled
 	 */
 	 */
 	local_paca->irq_happened &= ~PACA_IRQ_DEC;
 	local_paca->irq_happened &= ~PACA_IRQ_DEC;
-	if (decrementer_check_overflow())
+	if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
 		return 0x900;
 		return 0x900;
 
 
 	/* Finally check if an external interrupt happened */
 	/* Finally check if an external interrupt happened */

+ 4 - 14
arch/powerpc/kernel/pci-common.c

@@ -657,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
  *     ranges. However, some machines (thanks Apple !) tend to split their
  *     ranges. However, some machines (thanks Apple !) tend to split their
  *     space into lots of small contiguous ranges. So we have to coalesce.
  *     space into lots of small contiguous ranges. So we have to coalesce.
  *
  *
- *   - We can only cope with all memory ranges having the same offset
- *     between CPU addresses and PCI addresses. Unfortunately, some bridges
- *     are setup for a large 1:1 mapping along with a small "window" which
- *     maps PCI address 0 to some arbitrary high address of the CPU space in
- *     order to give access to the ISA memory hole.
- *     The way out of here that I've chosen for now is to always set the
- *     offset based on the first resource found, then override it if we
- *     have a different offset and the previous was set by an ISA hole.
- *
  *   - Some busses have IO space not starting at 0, which causes trouble with
  *   - Some busses have IO space not starting at 0, which causes trouble with
  *     the way we do our IO resource renumbering. The code somewhat deals with
  *     the way we do our IO resource renumbering. The code somewhat deals with
  *     it for 64 bits but I would expect problems on 32 bits.
  *     it for 64 bits but I would expect problems on 32 bits.
@@ -680,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
 	int rlen;
 	int rlen;
 	int pna = of_n_addr_cells(dev);
 	int pna = of_n_addr_cells(dev);
 	int np = pna + 5;
 	int np = pna + 5;
-	int memno = 0, isa_hole = -1;
+	int memno = 0;
 	u32 pci_space;
 	u32 pci_space;
 	unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
 	unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
-	unsigned long long isa_mb = 0;
 	struct resource *res;
 	struct resource *res;
 
 
 	printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
 	printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -777,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
 			}
 			}
 			/* Handles ISA memory hole space here */
 			/* Handles ISA memory hole space here */
 			if (pci_addr == 0) {
 			if (pci_addr == 0) {
-				isa_mb = cpu_addr;
-				isa_hole = memno;
 				if (primary || isa_mem_base == 0)
 				if (primary || isa_mem_base == 0)
 					isa_mem_base = cpu_addr;
 					isa_mem_base = cpu_addr;
 				hose->isa_mem_phys = cpu_addr;
 				hose->isa_mem_phys = cpu_addr;
@@ -839,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
 	}
 	}
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 		struct resource *res = dev->resource + i;
 		struct resource *res = dev->resource + i;
+		struct pci_bus_region reg;
 		if (!res->flags)
 		if (!res->flags)
 			continue;
 			continue;
 
 
@@ -847,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
 		 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
 		 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
 		 * since in that case, we don't want to re-assign anything
 		 * since in that case, we don't want to re-assign anything
 		 */
 		 */
+		pcibios_resource_to_bus(dev, &reg, res);
 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
-		    (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
+		    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
 			/* Only print message if not re-assigning */
 			/* Only print message if not re-assigning */
 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
 				pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
 				pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "

+ 4 - 3
arch/powerpc/kernel/process.c

@@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
 {
 {
 	mtspr(SPRN_DABR, dabr);
 	mtspr(SPRN_DABR, dabr);
-	mtspr(SPRN_DABRX, dabrx);
+	if (cpu_has_feature(CPU_FTR_DABRX))
+		mtspr(SPRN_DABRX, dabrx);
 	return 0;
 	return 0;
 }
 }
 #else
 #else
@@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
 /* Called with hard IRQs off */
 /* Called with hard IRQs off */
-void __ppc64_runlatch_on(void)
+void notrace __ppc64_runlatch_on(void)
 {
 {
 	struct thread_info *ti = current_thread_info();
 	struct thread_info *ti = current_thread_info();
 	unsigned long ctrl;
 	unsigned long ctrl;
@@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void)
 }
 }
 
 
 /* Called with hard IRQs off */
 /* Called with hard IRQs off */
-void __ppc64_runlatch_off(void)
+void notrace __ppc64_runlatch_off(void)
 {
 {
 	struct thread_info *ti = current_thread_info();
 	struct thread_info *ti = current_thread_info();
 	unsigned long ctrl;
 	unsigned long ctrl;

+ 38 - 2
arch/powerpc/kernel/signal.c

@@ -18,6 +18,7 @@
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/unistd.h>
 #include <asm/debug.h>
 #include <asm/debug.h>
+#include <asm/tm.h>
 
 
 #include "signal.h"
 #include "signal.h"
 
 
@@ -30,13 +31,13 @@ int show_unhandled_signals = 1;
 /*
 /*
  * Allocate space for the signal frame
  * Allocate space for the signal frame
  */
  */
-void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
 			   size_t frame_size, int is_32)
 			   size_t frame_size, int is_32)
 {
 {
         unsigned long oldsp, newsp;
         unsigned long oldsp, newsp;
 
 
         /* Default to using normal stack */
         /* Default to using normal stack */
-        oldsp = get_clean_sp(regs, is_32);
+        oldsp = get_clean_sp(sp, is_32);
 
 
 	/* Check for alt stack */
 	/* Check for alt stack */
 	if ((ka->sa.sa_flags & SA_ONSTACK) &&
 	if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -175,3 +176,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
 
 
 	user_enter();
 	user_enter();
 }
 }
+
+unsigned long get_tm_stackpointer(struct pt_regs *regs)
+{
+	/* When in an active transaction that takes a signal, we need to be
+	 * careful with the stack.  It's possible that the stack has moved back
+	 * up after the tbegin.  The obvious case here is when the tbegin is
+	 * called inside a function that returns before a tend.  In this case,
+	 * the stack is part of the checkpointed transactional memory state.
+	 * If we write over this non transactionally or in suspend, we are in
+	 * trouble because if we get a tm abort, the program counter and stack
+	 * pointer will be back at the tbegin but our in memory stack won't be
+	 * valid anymore.
+	 *
+	 * To avoid this, when taking a signal in an active transaction, we
+	 * need to use the stack pointer from the checkpointed state, rather
+	 * than the speculated state.  This ensures that the signal context
+	 * (written tm suspended) will be written below the stack required for
+	 * the rollback.  The transaction is aborted becuase of the treclaim,
+	 * so any memory written between the tbegin and the signal will be
+	 * rolled back anyway.
+	 *
+	 * For signals taken in non-TM or suspended mode, we use the
+	 * normal/non-checkpointed stack pointer.
+	 */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	if (MSR_TM_ACTIVE(regs->msr)) {
+		tm_enable();
+		tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+		if (MSR_TM_TRANSACTIONAL(regs->msr))
+			return current->thread.ckpt_regs.gpr[1];
+	}
+#endif
+	return regs->gpr[1];
+}

+ 1 - 1
arch/powerpc/kernel/signal.h

@@ -12,7 +12,7 @@
 
 
 extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
 extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
 
 
-extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
 				  size_t frame_size, int is_32);
 				  size_t frame_size, int is_32);
 
 
 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,

+ 2 - 8
arch/powerpc/kernel/signal_32.c

@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
 {
 {
 	unsigned long msr = regs->msr;
 	unsigned long msr = regs->msr;
 
 
-	/* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
-	 * thread.transact_fpr[], thread.transact_vr[], etc.
-	 */
-	tm_enable();
-	tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
 	/* Make sure floating point registers are stored in regs */
 	/* Make sure floating point registers are stored in regs */
 	flush_fp_to_thread(current);
 	flush_fp_to_thread(current);
 
 
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
 
 
 	/* Set up Signal Frame */
 	/* Set up Signal Frame */
 	/* Put a Real Time Context onto stack */
 	/* Put a Real Time Context onto stack */
-	rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+	rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
 	addr = rt_sf;
 	addr = rt_sf;
 	if (unlikely(rt_sf == NULL))
 	if (unlikely(rt_sf == NULL))
 		goto badframe;
 		goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 	unsigned long tramp;
 	unsigned long tramp;
 
 
 	/* Set up Signal Frame */
 	/* Set up Signal Frame */
-	frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+	frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
 	if (unlikely(frame == NULL))
 	if (unlikely(frame == NULL))
 		goto badframe;
 		goto badframe;
 	sc = (struct sigcontext __user *) &frame->sctx;
 	sc = (struct sigcontext __user *) &frame->sctx;

+ 7 - 16
arch/powerpc/kernel/signal_64.c

@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
  * As above, but Transactional Memory is in use, so deliver sigcontexts
  * As above, but Transactional Memory is in use, so deliver sigcontexts
  * containing checkpointed and transactional register states.
  * containing checkpointed and transactional register states.
  *
  *
- * To do this, we treclaim to gather both sets of registers and set up the
- * 'normal' sigcontext registers with rolled-back register values such that a
- * simple signal handler sees a correct checkpointed register state.
- * If interested, a TM-aware sighandler can examine the transactional registers
- * in the 2nd sigcontext to determine the real origin of the signal.
+ * To do this, we treclaim (done before entering here) to gather both sets of
+ * registers and set up the 'normal' sigcontext registers with rolled-back
+ * register values such that a simple signal handler sees a correct
+ * checkpointed register state.  If interested, a TM-aware sighandler can
+ * examine the transactional registers in the 2nd sigcontext to determine the
+ * real origin of the signal.
  */
  */
 static long setup_tm_sigcontexts(struct sigcontext __user *sc,
 static long setup_tm_sigcontexts(struct sigcontext __user *sc,
 				 struct sigcontext __user *tm_sc,
 				 struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
 
 
 	BUG_ON(!MSR_TM_ACTIVE(regs->msr));
 	BUG_ON(!MSR_TM_ACTIVE(regs->msr));
 
 
-	/* tm_reclaim rolls back all reg states, saving checkpointed (older)
-	 * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
-	 * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
-	 * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
-	 * thread.fr[]/vr[]s.  The transactional (newer) GPRs are on the
-	 * stack, in *regs.
-	 */
-	tm_enable();
-	tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
 	flush_fp_to_thread(current);
 	flush_fp_to_thread(current);
 
 
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
 	unsigned long newsp = 0;
 	unsigned long newsp = 0;
 	long err = 0;
 	long err = 0;
 
 
-	frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+	frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
 	if (unlikely(frame == NULL))
 	if (unlikely(frame == NULL))
 		goto badframe;
 		goto badframe;
 
 

+ 39 - 0
arch/powerpc/kernel/traps.c

@@ -53,6 +53,7 @@
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
 #include <asm/firmware.h>
 #include <asm/firmware.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
+#include <asm/tm.h>
 #endif
 #endif
 #include <asm/kexec.h>
 #include <asm/kexec.h>
 #include <asm/ppc-opcode.h>
 #include <asm/ppc-opcode.h>
@@ -932,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
 	return 0;
 	return 0;
 }
 }
 
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline bool tm_abort_check(struct pt_regs *regs, int cause)
+{
+        /* If we're emulating a load/store in an active transaction, we cannot
+         * emulate it as the kernel operates in transaction suspended context.
+         * We need to abort the transaction.  This creates a persistent TM
+         * abort so tell the user what caused it with a new code.
+	 */
+	if (MSR_TM_TRANSACTIONAL(regs->msr)) {
+		tm_enable();
+		tm_abort(cause);
+		return true;
+	}
+	return false;
+}
+#else
+static inline bool tm_abort_check(struct pt_regs *regs, int reason)
+{
+	return false;
+}
+#endif
+
 static int emulate_instruction(struct pt_regs *regs)
 static int emulate_instruction(struct pt_regs *regs)
 {
 {
 	u32 instword;
 	u32 instword;
@@ -971,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs)
 
 
 	/* Emulate load/store string insn. */
 	/* Emulate load/store string insn. */
 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
 	if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+		if (tm_abort_check(regs,
+				   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+			return -EINVAL;
 		PPC_WARN_EMULATED(string, regs);
 		PPC_WARN_EMULATED(string, regs);
 		return emulate_string_inst(regs, instword);
 		return emulate_string_inst(regs, instword);
 	}
 	}
@@ -1139,6 +1165,16 @@ bail:
 	exception_exit(prev_state);
 	exception_exit(prev_state);
 }
 }
 
 
+/*
+ * This occurs when running in hypervisor mode on POWER6 or later
+ * and an illegal instruction is encountered.
+ */
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
+{
+	regs->msr |= REASON_ILLEGAL;
+	program_check_exception(regs);
+}
+
 void alignment_exception(struct pt_regs *regs)
 void alignment_exception(struct pt_regs *regs)
 {
 {
 	enum ctx_state prev_state = exception_enter();
 	enum ctx_state prev_state = exception_enter();
@@ -1148,6 +1184,9 @@ void alignment_exception(struct pt_regs *regs)
 	if (!arch_irq_disabled_regs(regs))
 	if (!arch_irq_disabled_regs(regs))
 		local_irq_enable();
 		local_irq_enable();
 
 
+	if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
+		goto bail;
+
 	/* we don't implement logging of alignment exceptions */
 	/* we don't implement logging of alignment exceptions */
 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
 		fixed = fix_alignment(regs);
 		fixed = fix_alignment(regs);

+ 5 - 0
arch/powerpc/kvm/44x_tlb.c

@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
 	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 	struct kvmppc_44x_tlbe *tlbe;
 	struct kvmppc_44x_tlbe *tlbe;
 	unsigned int gtlb_index;
 	unsigned int gtlb_index;
+	int idx;
 
 
 	gtlb_index = kvmppc_get_gpr(vcpu, ra);
 	gtlb_index = kvmppc_get_gpr(vcpu, ra);
 	if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
 	if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
 		return EMULATE_FAIL;
 		return EMULATE_FAIL;
 	}
 	}
 
 
+	idx = srcu_read_lock(&vcpu->kvm->srcu);
+
 	if (tlbe_is_host_safe(vcpu, tlbe)) {
 	if (tlbe_is_host_safe(vcpu, tlbe)) {
 		gva_t eaddr;
 		gva_t eaddr;
 		gpa_t gpaddr;
 		gpa_t gpaddr;
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
 		kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
 		kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
 	}
 	}
 
 
+	srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
 	trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
 	trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
 			     tlbe->word2);
 			     tlbe->word2);
 
 

+ 2 - 0
arch/powerpc/kvm/book3s_hv.c

@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 	case H_CPPR:
 	case H_CPPR:
 	case H_EOI:
 	case H_EOI:
 	case H_IPI:
 	case H_IPI:
+	case H_IPOLL:
+	case H_XIRR_X:
 		if (kvmppc_xics_enabled(vcpu)) {
 		if (kvmppc_xics_enabled(vcpu)) {
 			ret = kvmppc_xics_hcall(vcpu, req);
 			ret = kvmppc_xics_hcall(vcpu, req);
 			break;
 			break;

部分文件因为文件数量过多而无法显示