Browse Source

Merge 4.0-rc7 into usb-next

We want the fixes in here, and to help resolve merge issues.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Greg Kroah-Hartman 10 years ago
parent
commit
b7a4abb674
100 changed files with 914 additions and 450 deletions
  1. 3 1
      Documentation/devicetree/bindings/net/dsa/dsa.txt
  2. 8 0
      Documentation/input/alps.txt
  3. 6 0
      Documentation/input/event-codes.txt
  4. 6 3
      Documentation/input/multi-touch-protocol.txt
  5. 25 19
      MAINTAINERS
  6. 1 1
      Makefile
  7. 18 6
      arch/arc/kernel/signal.c
  8. 1 0
      arch/arm/Kconfig
  9. 19 0
      arch/arm/boot/dts/dm8168-evm.dts
  10. 14 4
      arch/arm/boot/dts/dm816x.dtsi
  11. 0 2
      arch/arm/boot/dts/dra7.dtsi
  12. 4 0
      arch/arm/boot/dts/omap3.dtsi
  13. 1 0
      arch/arm/boot/dts/rk3288.dtsi
  14. 1 1
      arch/arm/boot/dts/socfpga.dtsi
  15. 16 0
      arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
  16. 1 2
      arch/arm/boot/dts/sun4i-a10.dtsi
  17. 1 2
      arch/arm/boot/dts/sun5i-a13.dtsi
  18. 1 2
      arch/arm/boot/dts/sun7i-a20.dtsi
  19. 2 0
      arch/arm/mach-omap2/id.c
  20. 48 63
      arch/arm/mach-pxa/irq.c
  21. 1 1
      arch/arm/mach-pxa/zeus.c
  22. 2 6
      arch/arm/mach-sunxi/Kconfig
  23. 14 1
      arch/arm/plat-omap/dmtimer.c
  24. 1 1
      arch/arm64/boot/dts/arm/juno-clocks.dtsi
  25. 24 8
      arch/arm64/include/asm/cmpxchg.h
  26. 9 0
      arch/arm64/include/asm/mmu_context.h
  27. 33 11
      arch/arm64/include/asm/percpu.h
  28. 1 0
      arch/metag/include/asm/io.h
  29. 104 0
      arch/metag/include/asm/pgtable-bits.h
  30. 1 94
      arch/metag/include/asm/pgtable.h
  31. 10 7
      arch/parisc/include/asm/pgalloc.h
  32. 6 3
      arch/parisc/kernel/syscall_table.S
  33. 1 1
      arch/powerpc/include/asm/cputhreads.h
  34. 3 0
      arch/powerpc/include/asm/ppc-opcode.h
  35. 3 0
      arch/powerpc/include/asm/reg.h
  36. 20 0
      arch/powerpc/kernel/cputable.c
  37. 2 0
      arch/powerpc/kernel/dbell.c
  38. 1 1
      arch/powerpc/kernel/exceptions-64s.S
  39. 4 4
      arch/powerpc/kvm/book3s_hv.c
  40. 1 0
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  41. 12 2
      arch/powerpc/platforms/powernv/smp.c
  42. 23 21
      arch/powerpc/platforms/pseries/mobility.c
  43. 1 1
      arch/s390/include/asm/elf.h
  44. 45 16
      arch/s390/kernel/ftrace.c
  45. 5 2
      arch/s390/kernel/perf_cpum_sf.c
  46. 11 0
      arch/s390/kernel/swsusp_asm64.S
  47. 12 0
      arch/sparc/include/asm/hypervisor.h
  48. 1 0
      arch/sparc/kernel/hvapi.c
  49. 16 0
      arch/sparc/kernel/hvcalls.S
  50. 33 0
      arch/sparc/kernel/pcr.c
  51. 43 12
      arch/sparc/kernel/perf_event.c
  52. 4 0
      arch/sparc/kernel/process_64.c
  53. 32 3
      arch/sparc/lib/memmove.S
  54. 5 5
      arch/x86/kernel/cpu/perf_event_intel.c
  55. 29 5
      arch/x86/kernel/entry_64.S
  56. 1 1
      arch/x86/kernel/kgdb.c
  57. 10 0
      arch/x86/kernel/reboot.c
  58. 3 1
      arch/x86/kvm/ioapic.c
  59. 1 2
      arch/x86/kvm/lapic.c
  60. 5 2
      arch/x86/kvm/vmx.c
  61. 9 1
      arch/x86/xen/p2m.c
  62. 1 1
      block/blk-merge.c
  63. 4 2
      block/blk-mq-tag.c
  64. 3 3
      block/blk-mq.c
  65. 3 3
      block/blk-settings.c
  66. 15 4
      drivers/ata/libata-core.c
  67. 8 0
      drivers/base/regmap/internal.h
  68. 8 8
      drivers/base/regmap/regcache.c
  69. 14 18
      drivers/base/regmap/regmap.c
  70. 4 4
      drivers/block/nbd.c
  71. 1 0
      drivers/block/nvme-core.c
  72. 3 0
      drivers/clocksource/Kconfig
  73. 0 7
      drivers/clocksource/timer-sun5i.c
  74. 1 0
      drivers/dma/bcm2835-dma.c
  75. 7 0
      drivers/dma/dma-jz4740.c
  76. 7 0
      drivers/dma/edma.c
  77. 3 1
      drivers/dma/moxart-dma.c
  78. 1 0
      drivers/dma/omap-dma.c
  79. 7 15
      drivers/firmware/dmi_scan.c
  80. 1 1
      drivers/gpio/gpio-mpc8xxx.c
  81. 1 1
      drivers/gpio/gpio-syscon.c
  82. 10 0
      drivers/gpio/gpiolib-acpi.c
  83. 1 12
      drivers/gpu/drm/drm_crtc.c
  84. 1 0
      drivers/gpu/drm/drm_edid_load.c
  85. 1 0
      drivers/gpu/drm/drm_probe_helper.c
  86. 5 3
      drivers/gpu/drm/exynos/exynos_drm_fimd.c
  87. 10 7
      drivers/gpu/drm/exynos/exynos_mixer.c
  88. 21 17
      drivers/gpu/drm/i915/i915_gem.c
  89. 1 1
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  90. 13 5
      drivers/gpu/drm/i915/intel_display.c
  91. 2 2
      drivers/gpu/drm/i915/intel_sprite.c
  92. 1 0
      drivers/gpu/drm/radeon/cikd.h
  93. 1 0
      drivers/gpu/drm/radeon/radeon.h
  94. 7 3
      drivers/gpu/drm/radeon/radeon_bios.c
  95. 4 7
      drivers/gpu/drm/radeon/radeon_mn.c
  96. 17 5
      drivers/gpu/drm/radeon/radeon_pm.c
  97. 1 1
      drivers/gpu/drm/radeon/radeon_ring.c
  98. 4 0
      drivers/gpu/drm/radeon/radeon_ttm.c
  99. 3 0
      drivers/gpu/drm/radeon/vce_v2_0.c
  100. 1 1
      drivers/iio/accel/bma180.c

+ 3 - 1
Documentation/devicetree/bindings/net/dsa/dsa.txt

@@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
 (DSA_MAX_SWITCHES).
 (DSA_MAX_SWITCHES).
 Each of these switch child nodes should have the following required properties:
 Each of these switch child nodes should have the following required properties:
 
 
-- reg			: Describes the switch address on the MII bus
+- reg			: Contains two fields. The first one describes the
+			  address on the MII bus. The second is the switch
+			  number that must be unique in cascaded configurations
 - #address-cells	: Must be 1
 - #address-cells	: Must be 1
 - #size-cells		: Must be 0
 - #size-cells		: Must be 0
 
 

+ 8 - 0
Documentation/input/alps.txt

@@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
  byte 4:  0   y6   y5   y4   y3   y2   y1   y0
  byte 4:  0   y6   y5   y4   y3   y2   y1   y0
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
 
+Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
+the DualPoint Stick.
+
 Dualpoint device -- interleaved packet format
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
 ---------------------------------------------
 
 
@@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
  byte 7:    0   y6   y5   y4   y3   y2   y1   y0
  byte 7:    0   y6   y5   y4   y3   y2   y1   y0
  byte 8:    0   z6   z5   z4   z3   z2   z1   z0
  byte 8:    0   z6   z5   z4   z3   z2   z1   z0
 
 
+Devices which use the interleaving format normally send standard PS/2 mouse
+packets for the DualPoint Stick + ALPS Absolute Mode packets for the
+touchpad, switching to the interleaved packet format when both the stick and
+the touchpad are used at the same time.
+
 ALPS Absolute Mode - Protocol Version 3
 ALPS Absolute Mode - Protocol Version 3
 ---------------------------------------
 ---------------------------------------
 
 

+ 6 - 0
Documentation/input/event-codes.txt

@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
 The kernel does not provide button emulation for such devices but treats
 The kernel does not provide button emulation for such devices but treats
 them as any other INPUT_PROP_BUTTONPAD device.
 them as any other INPUT_PROP_BUTTONPAD device.
 
 
+INPUT_PROP_ACCELEROMETER
+-------------------------
+Directional axes on this device (absolute and/or relative x, y, z) represent
+accelerometer data. All other axes retain their meaning. A device must not mix
+regular directional axes and accelerometer axes on the same event node.
+
 Guidelines:
 Guidelines:
 ==========
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
 The guidelines below ensure proper single-touch and multi-finger functionality.

+ 6 - 3
Documentation/input/multi-touch-protocol.txt

@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
 
 
 The type of approaching tool. A lot of kernel drivers cannot distinguish
 The type of approaching tool. A lot of kernel drivers cannot distinguish
 between different tool types, such as a finger or a pen. In such cases, the
 between different tool types, such as a finger or a pen. In such cases, the
-event should be omitted. The protocol currently supports MT_TOOL_FINGER and
-MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
-drivers should instead use input_mt_report_slot_state().
+event should be omitted. The protocol currently supports MT_TOOL_FINGER,
+MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
+by input core; drivers should instead use input_mt_report_slot_state().
+A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
+device, because the firmware may not be able to determine which tool is being
+used when it first appears.
 
 
 ABS_MT_BLOB_ID
 ABS_MT_BLOB_ID
 
 

+ 25 - 19
MAINTAINERS

@@ -637,8 +637,7 @@ F:      drivers/gpu/drm/radeon/radeon_kfd.h
 F:      include/uapi/linux/kfd_ioctl.h
 F:      include/uapi/linux/kfd_ioctl.h
 
 
 AMD MICROCODE UPDATE SUPPORT
 AMD MICROCODE UPDATE SUPPORT
-M:	Andreas Herrmann <herrmann.der.user@googlemail.com>
-L:	amd64-microcode@amd64.org
+M:	Borislav Petkov <bp@alien8.de>
 S:	Maintained
 S:	Maintained
 F:	arch/x86/kernel/cpu/microcode/amd*
 F:	arch/x86/kernel/cpu/microcode/amd*
 
 
@@ -1186,7 +1185,7 @@ M:	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-mvebu/
 F:	arch/arm/mach-mvebu/
-F:	drivers/rtc/armada38x-rtc
+F:	drivers/rtc/rtc-armada38x.c
 
 
 ARM/Marvell Berlin SoC support
 ARM/Marvell Berlin SoC support
 M:	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 M:	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1362,6 +1361,7 @@ F:	drivers/i2c/busses/i2c-rk3x.c
 F:	drivers/*/*rockchip*
 F:	drivers/*/*rockchip*
 F:	drivers/*/*/*rockchip*
 F:	drivers/*/*/*rockchip*
 F:	sound/soc/rockchip/
 F:	sound/soc/rockchip/
+N:	rockchip
 
 
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:	Kukjin Kim <kgene@kernel.org>
 M:	Kukjin Kim <kgene@kernel.org>
@@ -1675,8 +1675,8 @@ F:	drivers/misc/eeprom/at24.c
 F:	include/linux/platform_data/at24.h
 F:	include/linux/platform_data/at24.h
 
 
 ATA OVER ETHERNET (AOE) DRIVER
 ATA OVER ETHERNET (AOE) DRIVER
-M:	"Ed L. Cashin" <ecashin@coraid.com>
-W:	http://support.coraid.com/support/linux
+M:	"Ed L. Cashin" <ed.cashin@acm.org>
+W:	http://www.openaoe.org/
 S:	Supported
 S:	Supported
 F:	Documentation/aoe/
 F:	Documentation/aoe/
 F:	drivers/block/aoe/
 F:	drivers/block/aoe/
@@ -3252,6 +3252,13 @@ S:	Maintained
 F:	Documentation/hwmon/dme1737
 F:	Documentation/hwmon/dme1737
 F:	drivers/hwmon/dme1737.c
 F:	drivers/hwmon/dme1737.c
 
 
+DMI/SMBIOS SUPPORT
+M:	Jean Delvare <jdelvare@suse.de>
+S:	Maintained
+F:	drivers/firmware/dmi-id.c
+F:	drivers/firmware/dmi_scan.c
+F:	include/linux/dmi.h
+
 DOCKING STATION DRIVER
 DOCKING STATION DRIVER
 M:	Shaohua Li <shaohua.li@intel.com>
 M:	Shaohua Li <shaohua.li@intel.com>
 L:	linux-acpi@vger.kernel.org
 L:	linux-acpi@vger.kernel.org
@@ -5087,7 +5094,7 @@ S:	Supported
 F:	drivers/platform/x86/intel_menlow.c
 F:	drivers/platform/x86/intel_menlow.c
 
 
 INTEL IA32 MICROCODE UPDATE SUPPORT
 INTEL IA32 MICROCODE UPDATE SUPPORT
-M:	Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+M:	Borislav Petkov <bp@alien8.de>
 S:	Maintained
 S:	Maintained
 F:	arch/x86/kernel/cpu/microcode/core*
 F:	arch/x86/kernel/cpu/microcode/core*
 F:	arch/x86/kernel/cpu/microcode/intel*
 F:	arch/x86/kernel/cpu/microcode/intel*
@@ -5128,22 +5135,21 @@ M:	Deepak Saxena <dsaxena@plexity.net>
 S:	Maintained
 S:	Maintained
 F:	drivers/char/hw_random/ixp4xx-rng.c
 F:	drivers/char/hw_random/ixp4xx-rng.c
 
 
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
+INTEL ETHERNET DRIVERS
 M:	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
 M:	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-M:	Jesse Brandeburg <jesse.brandeburg@intel.com>
-M:	Bruce Allan <bruce.w.allan@intel.com>
-M:	Carolyn Wyborny <carolyn.wyborny@intel.com>
-M:	Don Skidmore <donald.c.skidmore@intel.com>
-M:	Greg Rose <gregory.v.rose@intel.com>
-M:	Matthew Vick <matthew.vick@intel.com>
-M:	John Ronciak <john.ronciak@intel.com>
-M:	Mitch Williams <mitch.a.williams@intel.com>
-M:	Linux NICS <linux.nics@intel.com>
-L:	e1000-devel@lists.sourceforge.net
+R:	Jesse Brandeburg <jesse.brandeburg@intel.com>
+R:	Shannon Nelson <shannon.nelson@intel.com>
+R:	Carolyn Wyborny <carolyn.wyborny@intel.com>
+R:	Don Skidmore <donald.c.skidmore@intel.com>
+R:	Matthew Vick <matthew.vick@intel.com>
+R:	John Ronciak <john.ronciak@intel.com>
+R:	Mitch Williams <mitch.a.williams@intel.com>
+L:	intel-wired-lan@lists.osuosl.org
 W:	http://www.intel.com/support/feedback.htm
 W:	http://www.intel.com/support/feedback.htm
 W:	http://e1000.sourceforge.net/
 W:	http://e1000.sourceforge.net/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
+Q:	http://patchwork.ozlabs.org/project/intel-wired-lan/list/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
 S:	Supported
 S:	Supported
 F:	Documentation/networking/e100.txt
 F:	Documentation/networking/e100.txt
 F:	Documentation/networking/e1000.txt
 F:	Documentation/networking/e1000.txt

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 0
 PATCHLEVEL = 0
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma sheep
 NAME = Hurr durr I'ma sheep
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 18 - 6
arch/arc/kernel/signal.c

@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
 	       sigset_t *set)
 	       sigset_t *set)
 {
 {
 	int err;
 	int err;
-	err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+	err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
 			     sizeof(sf->uc.uc_mcontext.regs.scratch));
 			     sizeof(sf->uc.uc_mcontext.regs.scratch));
 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
 
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
 	if (!err)
 	if (!err)
 		set_current_blocked(&set);
 		set_current_blocked(&set);
 
 
-	err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+	err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
 				sizeof(sf->uc.uc_mcontext.regs.scratch));
 				sizeof(sf->uc.uc_mcontext.regs.scratch));
 
 
 	return err;
 	return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
 	/* Don't restart from sigreturn */
 	/* Don't restart from sigreturn */
 	syscall_wont_restart(regs);
 	syscall_wont_restart(regs);
 
 
+	/*
+	 * Ensure that sigreturn always returns to user mode (in case the
+	 * regs saved on user stack got fudged between save and sigreturn)
+	 * Otherwise it is easy to panic the kernel with a custom
+	 * signal handler and/or restorer which clobberes the status32/ret
+	 * to return to a bogus location in kernel mode.
+	 */
+	regs->status32 |= STATUS_U_MASK;
+
 	return regs->r0;
 	return regs->r0;
 
 
 badframe:
 badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 
 
 	/*
 	/*
 	 * handler returns using sigreturn stub provided already by userpsace
 	 * handler returns using sigreturn stub provided already by userpsace
+	 * If not, nuke the process right away
 	 */
 	 */
-	BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
+	if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
+		return 1;
+
 	regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
 	regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
 
 
 	/* User Stack for signal handler will be above the frame just carved */
 	/* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 {
 {
 	sigset_t *oldset = sigmask_to_save();
 	sigset_t *oldset = sigmask_to_save();
-	int ret;
+	int failed;
 
 
 	/* Set up the stack frame */
 	/* Set up the stack frame */
-	ret = setup_rt_frame(ksig, oldset, regs);
+	failed = setup_rt_frame(ksig, oldset, regs);
 
 
-	signal_setup_done(ret, ksig, 0);
+	signal_setup_done(failed, ksig, 0);
 }
 }
 
 
 void do_signal(struct pt_regs *regs)
 void do_signal(struct pt_regs *regs)

+ 1 - 0
arch/arm/Kconfig

@@ -619,6 +619,7 @@ config ARCH_PXA
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS
 	select GPIO_PXA
 	select GPIO_PXA
 	select HAVE_IDE
 	select HAVE_IDE
+	select IRQ_DOMAIN
 	select MULTI_IRQ_HANDLER
 	select MULTI_IRQ_HANDLER
 	select PLAT_PXA
 	select PLAT_PXA
 	select SPARSE_IRQ
 	select SPARSE_IRQ

+ 19 - 0
arch/arm/boot/dts/dm8168-evm.dts

@@ -36,6 +36,20 @@
 		>;
 		>;
 	};
 	};
 
 
+	mmc_pins: pinmux_mmc_pins {
+		pinctrl-single,pins = <
+			DM816X_IOPAD(0x0a70, MUX_MODE0)			/* SD_POW */
+			DM816X_IOPAD(0x0a74, MUX_MODE0)			/* SD_CLK */
+			DM816X_IOPAD(0x0a78, MUX_MODE0)			/* SD_CMD */
+			DM816X_IOPAD(0x0a7C, MUX_MODE0)			/* SD_DAT0 */
+			DM816X_IOPAD(0x0a80, MUX_MODE0)			/* SD_DAT1 */
+			DM816X_IOPAD(0x0a84, MUX_MODE0)			/* SD_DAT2 */
+			DM816X_IOPAD(0x0a88, MUX_MODE0)			/* SD_DAT2 */
+			DM816X_IOPAD(0x0a8c, MUX_MODE2)			/* GP1[7] */
+			DM816X_IOPAD(0x0a90, MUX_MODE2)			/* GP1[8] */
+		>;
+	};
+
 	usb0_pins: pinmux_usb0_pins {
 	usb0_pins: pinmux_usb0_pins {
 		pinctrl-single,pins = <
 		pinctrl-single,pins = <
 			DM816X_IOPAD(0x0d00, MUX_MODE0)			/* USB0_DRVVBUS */
 			DM816X_IOPAD(0x0d00, MUX_MODE0)			/* USB0_DRVVBUS */
@@ -137,7 +151,12 @@
 };
 };
 
 
 &mmc1 {
 &mmc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc_pins>;
 	vmmc-supply = <&vmmcsd_fixed>;
 	vmmc-supply = <&vmmcsd_fixed>;
+	bus-width = <4>;
+	cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
 };
 };
 
 
 /* At least dm8168-evm rev c won't support multipoint, later may */
 /* At least dm8168-evm rev c won't support multipoint, later may */

+ 14 - 4
arch/arm/boot/dts/dm816x.dtsi

@@ -150,17 +150,27 @@
 		};
 		};
 
 
 		gpio1: gpio@48032000 {
 		gpio1: gpio@48032000 {
-			compatible = "ti,omap3-gpio";
+			compatible = "ti,omap4-gpio";
 			ti,hwmods = "gpio1";
 			ti,hwmods = "gpio1";
+			ti,gpio-always-on;
 			reg = <0x48032000 0x1000>;
 			reg = <0x48032000 0x1000>;
-			interrupts = <97>;
+			interrupts = <96>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 		};
 		};
 
 
 		gpio2: gpio@4804c000 {
 		gpio2: gpio@4804c000 {
-			compatible = "ti,omap3-gpio";
+			compatible = "ti,omap4-gpio";
 			ti,hwmods = "gpio2";
 			ti,hwmods = "gpio2";
+			ti,gpio-always-on;
 			reg = <0x4804c000 0x1000>;
 			reg = <0x4804c000 0x1000>;
-			interrupts = <99>;
+			interrupts = <98>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
 		};
 		};
 
 
 		gpmc: gpmc@50000000 {
 		gpmc: gpmc@50000000 {

+ 0 - 2
arch/arm/boot/dts/dra7.dtsi

@@ -1111,7 +1111,6 @@
 					      "wkupclk", "refclk",
 					      "wkupclk", "refclk",
 					      "div-clk", "phy-div";
 					      "div-clk", "phy-div";
 				#phy-cells = <0>;
 				#phy-cells = <0>;
-				ti,hwmods = "pcie1-phy";
 			};
 			};
 
 
 			pcie2_phy: pciephy@4a095000 {
 			pcie2_phy: pciephy@4a095000 {
@@ -1130,7 +1129,6 @@
 					      "wkupclk", "refclk",
 					      "wkupclk", "refclk",
 					      "div-clk", "phy-div";
 					      "div-clk", "phy-div";
 				#phy-cells = <0>;
 				#phy-cells = <0>;
-				ti,hwmods = "pcie2-phy";
 				status = "disabled";
 				status = "disabled";
 			};
 			};
 		};
 		};

+ 4 - 0
arch/arm/boot/dts/omap3.dtsi

@@ -92,6 +92,8 @@
 			ti,hwmods = "aes";
 			ti,hwmods = "aes";
 			reg = <0x480c5000 0x50>;
 			reg = <0x480c5000 0x50>;
 			interrupts = <0>;
 			interrupts = <0>;
+			dmas = <&sdma 65 &sdma 66>;
+			dma-names = "tx", "rx";
 		};
 		};
 
 
 		prm: prm@48306000 {
 		prm: prm@48306000 {
@@ -550,6 +552,8 @@
 			ti,hwmods = "sham";
 			ti,hwmods = "sham";
 			reg = <0x480c3000 0x64>;
 			reg = <0x480c3000 0x64>;
 			interrupts = <49>;
 			interrupts = <49>;
+			dmas = <&sdma 69>;
+			dma-names = "rx";
 		};
 		};
 
 
 		smartreflex_core: smartreflex@480cb000 {
 		smartreflex_core: smartreflex@480cb000 {

+ 1 - 0
arch/arm/boot/dts/rk3288.dtsi

@@ -411,6 +411,7 @@
 			"mac_clk_rx", "mac_clk_tx",
 			"mac_clk_rx", "mac_clk_tx",
 			"clk_mac_ref", "clk_mac_refout",
 			"clk_mac_ref", "clk_mac_refout",
 			"aclk_mac", "pclk_mac";
 			"aclk_mac", "pclk_mac";
+		status = "disabled";
 	};
 	};
 
 
 	usb_host0_ehci: usb@ff500000 {
 	usb_host0_ehci: usb@ff500000 {

+ 1 - 1
arch/arm/boot/dts/socfpga.dtsi

@@ -660,7 +660,7 @@
 			#address-cells = <1>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			#size-cells = <0>;
 			reg = <0xfff01000 0x1000>;
 			reg = <0xfff01000 0x1000>;
-			interrupts = <0 156 4>;
+			interrupts = <0 155 4>;
 			num-cs = <4>;
 			num-cs = <4>;
 			clocks = <&spi_m_clk>;
 			clocks = <&spi_m_clk>;
 			status = "disabled";
 			status = "disabled";

+ 16 - 0
arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts

@@ -56,6 +56,22 @@
 	model = "Olimex A10-OLinuXino-LIME";
 	model = "Olimex A10-OLinuXino-LIME";
 	compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
 	compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
 
 
+	cpus {
+		cpu0: cpu@0 {
+			/*
+			 * The A10-Lime is known to be unstable
+			 * when running at 1008 MHz
+			 */
+			operating-points = <
+				/* kHz    uV */
+				912000  1350000
+				864000  1300000
+				624000  1250000
+				>;
+			cooling-max-level = <2>;
+		};
+	};
+
 	soc@01c00000 {
 	soc@01c00000 {
 		emac: ethernet@01c0b000 {
 		emac: ethernet@01c0b000 {
 			pinctrl-names = "default";
 			pinctrl-names = "default";

+ 1 - 2
arch/arm/boot/dts/sun4i-a10.dtsi

@@ -75,7 +75,6 @@
 			clock-latency = <244144>; /* 8 32k periods */
 			clock-latency = <244144>; /* 8 32k periods */
 			operating-points = <
 			operating-points = <
 				/* kHz    uV */
 				/* kHz    uV */
-				1056000 1500000
 				1008000 1400000
 				1008000 1400000
 				912000  1350000
 				912000  1350000
 				864000  1300000
 				864000  1300000
@@ -83,7 +82,7 @@
 				>;
 				>;
 			#cooling-cells = <2>;
 			#cooling-cells = <2>;
 			cooling-min-level = <0>;
 			cooling-min-level = <0>;
-			cooling-max-level = <4>;
+			cooling-max-level = <3>;
 		};
 		};
 	};
 	};
 
 

+ 1 - 2
arch/arm/boot/dts/sun5i-a13.dtsi

@@ -47,7 +47,6 @@
 			clock-latency = <244144>; /* 8 32k periods */
 			clock-latency = <244144>; /* 8 32k periods */
 			operating-points = <
 			operating-points = <
 				/* kHz    uV */
 				/* kHz    uV */
-				1104000	1500000
 				1008000 1400000
 				1008000 1400000
 				912000  1350000
 				912000  1350000
 				864000  1300000
 				864000  1300000
@@ -57,7 +56,7 @@
 				>;
 				>;
 			#cooling-cells = <2>;
 			#cooling-cells = <2>;
 			cooling-min-level = <0>;
 			cooling-min-level = <0>;
-			cooling-max-level = <6>;
+			cooling-max-level = <5>;
 		};
 		};
 	};
 	};
 
 

+ 1 - 2
arch/arm/boot/dts/sun7i-a20.dtsi

@@ -105,7 +105,6 @@
 			clock-latency = <244144>; /* 8 32k periods */
 			clock-latency = <244144>; /* 8 32k periods */
 			operating-points = <
 			operating-points = <
 				/* kHz    uV */
 				/* kHz    uV */
-				1008000 1450000
 				960000  1400000
 				960000  1400000
 				912000  1400000
 				912000  1400000
 				864000  1300000
 				864000  1300000
@@ -116,7 +115,7 @@
 				>;
 				>;
 			#cooling-cells = <2>;
 			#cooling-cells = <2>;
 			cooling-min-level = <0>;
 			cooling-min-level = <0>;
-			cooling-max-level = <7>;
+			cooling-max-level = <6>;
 		};
 		};
 
 
 		cpu@1 {
 		cpu@1 {

+ 2 - 0
arch/arm/mach-omap2/id.c

@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
 		return kasprintf(GFP_KERNEL, "OMAP4");
 		return kasprintf(GFP_KERNEL, "OMAP4");
 	else if (soc_is_omap54xx())
 	else if (soc_is_omap54xx())
 		return kasprintf(GFP_KERNEL, "OMAP5");
 		return kasprintf(GFP_KERNEL, "OMAP5");
+	else if (soc_is_am33xx() || soc_is_am335x())
+		return kasprintf(GFP_KERNEL, "AM33xx");
 	else if (soc_is_am43xx())
 	else if (soc_is_am43xx())
 		return kasprintf(GFP_KERNEL, "AM43xx");
 		return kasprintf(GFP_KERNEL, "AM43xx");
 	else if (soc_is_dra7xx())
 	else if (soc_is_dra7xx())

+ 48 - 63
arch/arm/mach-pxa/irq.c

@@ -11,6 +11,7 @@
  *  it under the terms of the GNU General Public License version 2 as
  *  it under the terms of the GNU General Public License version 2 as
  *  published by the Free Software Foundation.
  *  published by the Free Software Foundation.
  */
  */
+#include <linux/bitops.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
@@ -40,7 +41,6 @@
 #define ICHP_VAL_IRQ		(1 << 31)
 #define ICHP_VAL_IRQ		(1 << 31)
 #define ICHP_IRQ(i)		(((i) >> 16) & 0x7fff)
 #define ICHP_IRQ(i)		(((i) >> 16) & 0x7fff)
 #define IPR_VALID		(1 << 31)
 #define IPR_VALID		(1 << 31)
-#define IRQ_BIT(n)		(((n) - PXA_IRQ(0)) & 0x1f)
 
 
 #define MAX_INTERNAL_IRQS	128
 #define MAX_INTERNAL_IRQS	128
 
 
@@ -51,6 +51,7 @@
 static void __iomem *pxa_irq_base;
 static void __iomem *pxa_irq_base;
 static int pxa_internal_irq_nr;
 static int pxa_internal_irq_nr;
 static bool cpu_has_ipr;
 static bool cpu_has_ipr;
+static struct irq_domain *pxa_irq_domain;
 
 
 static inline void __iomem *irq_base(int i)
 static inline void __iomem *irq_base(int i)
 {
 {
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
 void pxa_mask_irq(struct irq_data *d)
 void pxa_mask_irq(struct irq_data *d)
 {
 {
 	void __iomem *base = irq_data_get_irq_chip_data(d);
 	void __iomem *base = irq_data_get_irq_chip_data(d);
+	irq_hw_number_t irq = irqd_to_hwirq(d);
 	uint32_t icmr = __raw_readl(base + ICMR);
 	uint32_t icmr = __raw_readl(base + ICMR);
 
 
-	icmr &= ~(1 << IRQ_BIT(d->irq));
+	icmr &= ~BIT(irq & 0x1f);
 	__raw_writel(icmr, base + ICMR);
 	__raw_writel(icmr, base + ICMR);
 }
 }
 
 
 void pxa_unmask_irq(struct irq_data *d)
 void pxa_unmask_irq(struct irq_data *d)
 {
 {
 	void __iomem *base = irq_data_get_irq_chip_data(d);
 	void __iomem *base = irq_data_get_irq_chip_data(d);
+	irq_hw_number_t irq = irqd_to_hwirq(d);
 	uint32_t icmr = __raw_readl(base + ICMR);
 	uint32_t icmr = __raw_readl(base + ICMR);
 
 
-	icmr |= 1 << IRQ_BIT(d->irq);
+	icmr |= BIT(irq & 0x1f);
 	__raw_writel(icmr, base + ICMR);
 	__raw_writel(icmr, base + ICMR);
 }
 }
 
 
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
 	} while (1);
 	} while (1);
 }
 }
 
 
-void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
+		       irq_hw_number_t hw)
 {
 {
-	int irq, i, n;
+	void __iomem *base = irq_base(hw / 32);
 
 
-	BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+	/* initialize interrupt priority */
+	if (cpu_has_ipr)
+		__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
+
+	irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(virq, base);
+	set_irq_flags(virq, IRQF_VALID);
+
+	return 0;
+}
+
+static struct irq_domain_ops pxa_irq_ops = {
+	.map    = pxa_irq_map,
+	.xlate  = irq_domain_xlate_onecell,
+};
+
+static __init void
+pxa_init_irq_common(struct device_node *node, int irq_nr,
+		    int (*fn)(struct irq_data *, unsigned int))
+{
+	int n;
 
 
 	pxa_internal_irq_nr = irq_nr;
 	pxa_internal_irq_nr = irq_nr;
-	cpu_has_ipr = !cpu_is_pxa25x();
-	pxa_irq_base = io_p2v(0x40d00000);
+	pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
+					       PXA_IRQ(0), 0,
+					       &pxa_irq_ops, NULL);
+	if (!pxa_irq_domain)
+		panic("Unable to add PXA IRQ domain\n");
+	irq_set_default_host(pxa_irq_domain);
 
 
 	for (n = 0; n < irq_nr; n += 32) {
 	for (n = 0; n < irq_nr; n += 32) {
 		void __iomem *base = irq_base(n >> 5);
 		void __iomem *base = irq_base(n >> 5);
 
 
 		__raw_writel(0, base + ICMR);	/* disable all IRQs */
 		__raw_writel(0, base + ICMR);	/* disable all IRQs */
 		__raw_writel(0, base + ICLR);	/* all IRQs are IRQ, not FIQ */
 		__raw_writel(0, base + ICLR);	/* all IRQs are IRQ, not FIQ */
-		for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
-			/* initialize interrupt priority */
-			if (cpu_has_ipr)
-				__raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
-
-			irq = PXA_IRQ(i);
-			irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
-						 handle_level_irq);
-			irq_set_chip_data(irq, base);
-			set_irq_flags(irq, IRQF_VALID);
-		}
 	}
 	}
-
 	/* only unmasked interrupts kick us out of idle */
 	/* only unmasked interrupts kick us out of idle */
 	__raw_writel(1, irq_base(0) + ICCR);
 	__raw_writel(1, irq_base(0) + ICCR);
 
 
 	pxa_internal_irq_chip.irq_set_wake = fn;
 	pxa_internal_irq_chip.irq_set_wake = fn;
 }
 }
 
 
+void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+{
+	BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+
+	pxa_irq_base = io_p2v(0x40d00000);
+	cpu_has_ipr = !cpu_is_pxa25x();
+	pxa_init_irq_common(NULL, irq_nr, fn);
+}
+
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
 static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
 static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
 static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
 };
 };
 
 
 #ifdef CONFIG_OF
 #ifdef CONFIG_OF
-static struct irq_domain *pxa_irq_domain;
-
-static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
-		       irq_hw_number_t hw)
-{
-	void __iomem *base = irq_base(hw / 32);
-
-	/* initialize interrupt priority */
-	if (cpu_has_ipr)
-		__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
-
-	irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
-				 handle_level_irq);
-	irq_set_chip_data(hw, base);
-	set_irq_flags(hw, IRQF_VALID);
-
-	return 0;
-}
-
-static struct irq_domain_ops pxa_irq_ops = {
-	.map    = pxa_irq_map,
-	.xlate  = irq_domain_xlate_onecell,
-};
-
 static const struct of_device_id intc_ids[] __initconst = {
 static const struct of_device_id intc_ids[] __initconst = {
 	{ .compatible = "marvell,pxa-intc", },
 	{ .compatible = "marvell,pxa-intc", },
 	{}
 	{}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
 {
 {
 	struct device_node *node;
 	struct device_node *node;
 	struct resource res;
 	struct resource res;
-	int n, ret;
+	int ret;
 
 
 	node = of_find_matching_node(NULL, intc_ids);
 	node = of_find_matching_node(NULL, intc_ids);
 	if (!node) {
 	if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
 		return;
 		return;
 	}
 	}
 
 
-	pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
-					       &pxa_irq_ops, NULL);
-	if (!pxa_irq_domain)
-		panic("Unable to add PXA IRQ domain\n");
-
-	irq_set_default_host(pxa_irq_domain);
-
-	for (n = 0; n < pxa_internal_irq_nr; n += 32) {
-		void __iomem *base = irq_base(n >> 5);
-
-		__raw_writel(0, base + ICMR);	/* disable all IRQs */
-		__raw_writel(0, base + ICLR);	/* all IRQs are IRQ, not FIQ */
-	}
-
-	/* only unmasked interrupts kick us out of idle */
-	__raw_writel(1, irq_base(0) + ICCR);
-
-	pxa_internal_irq_chip.irq_set_wake = fn;
+	pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
 }
 }
 #endif /* CONFIG_OF */
 #endif /* CONFIG_OF */

+ 1 - 1
arch/arm/mach-pxa/zeus.c

@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
 };
 };
 
 
 static struct platform_device can_regulator_device = {
 static struct platform_device can_regulator_device = {
-	.name	= "reg-fixed-volage",
+	.name	= "reg-fixed-voltage",
 	.id	= 0,
 	.id	= 0,
 	.dev	= {
 	.dev	= {
 		.platform_data	= &can_regulator_pdata,
 		.platform_data	= &can_regulator_pdata,

+ 2 - 6
arch/arm/mach-sunxi/Kconfig

@@ -1,10 +1,12 @@
 menuconfig ARCH_SUNXI
 menuconfig ARCH_SUNXI
 	bool "Allwinner SoCs" if ARCH_MULTI_V7
 	bool "Allwinner SoCs" if ARCH_MULTI_V7
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
+	select ARCH_HAS_RESET_CONTROLLER
 	select CLKSRC_MMIO
 	select CLKSRC_MMIO
 	select GENERIC_IRQ_CHIP
 	select GENERIC_IRQ_CHIP
 	select PINCTRL
 	select PINCTRL
 	select SUN4I_TIMER
 	select SUN4I_TIMER
+	select RESET_CONTROLLER
 
 
 if ARCH_SUNXI
 if ARCH_SUNXI
 
 
@@ -20,10 +22,8 @@ config MACH_SUN5I
 config MACH_SUN6I
 config MACH_SUN6I
 	bool "Allwinner A31 (sun6i) SoCs support"
 	bool "Allwinner A31 (sun6i) SoCs support"
 	default ARCH_SUNXI
 	default ARCH_SUNXI
-	select ARCH_HAS_RESET_CONTROLLER
 	select ARM_GIC
 	select ARM_GIC
 	select MFD_SUN6I_PRCM
 	select MFD_SUN6I_PRCM
-	select RESET_CONTROLLER
 	select SUN5I_HSTIMER
 	select SUN5I_HSTIMER
 
 
 config MACH_SUN7I
 config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
 config MACH_SUN8I
 config MACH_SUN8I
 	bool "Allwinner A23 (sun8i) SoCs support"
 	bool "Allwinner A23 (sun8i) SoCs support"
 	default ARCH_SUNXI
 	default ARCH_SUNXI
-	select ARCH_HAS_RESET_CONTROLLER
 	select ARM_GIC
 	select ARM_GIC
 	select MFD_SUN6I_PRCM
 	select MFD_SUN6I_PRCM
-	select RESET_CONTROLLER
 
 
 config MACH_SUN9I
 config MACH_SUN9I
 	bool "Allwinner (sun9i) SoCs support"
 	bool "Allwinner (sun9i) SoCs support"
 	default ARCH_SUNXI
 	default ARCH_SUNXI
-	select ARCH_HAS_RESET_CONTROLLER
 	select ARM_GIC
 	select ARM_GIC
-	select RESET_CONTROLLER
 
 
 endif
 endif

+ 14 - 1
arch/arm/plat-omap/dmtimer.c

@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
 	const struct of_device_id *match;
 	const struct of_device_id *match;
 	const struct dmtimer_platform_data *pdata;
 	const struct dmtimer_platform_data *pdata;
+	int ret;
 
 
 	match = of_match_device(of_match_ptr(omap_timer_match), dev);
 	match = of_match_device(of_match_ptr(omap_timer_match), dev);
 	pdata = match ? match->data : dev->platform_data;
 	pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
 	}
 	}
 
 
 	if (!timer->reserved) {
 	if (!timer->reserved) {
-		pm_runtime_get_sync(dev);
+		ret = pm_runtime_get_sync(dev);
+		if (ret < 0) {
+			dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
+				__func__);
+			goto err_get_sync;
+		}
 		__omap_dm_timer_init_regs(timer);
 		__omap_dm_timer_init_regs(timer);
 		pm_runtime_put(dev);
 		pm_runtime_put(dev);
 	}
 	}
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
 	dev_dbg(dev, "Device Probed.\n");
 	dev_dbg(dev, "Device Probed.\n");
 
 
 	return 0;
 	return 0;
+
+err_get_sync:
+	pm_runtime_put_noidle(dev);
+	pm_runtime_disable(dev);
+	return ret;
 }
 }
 
 
 /**
 /**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
 		}
 		}
 	spin_unlock_irqrestore(&dm_timer_lock, flags);
 	spin_unlock_irqrestore(&dm_timer_lock, flags);
 
 
+	pm_runtime_disable(&pdev->dev);
+
 	return ret;
 	return ret;
 }
 }
 
 

+ 1 - 1
arch/arm64/boot/dts/arm/juno-clocks.dtsi

@@ -8,7 +8,7 @@
  */
  */
 
 
 	/* SoC fixed clocks */
 	/* SoC fixed clocks */
-	soc_uartclk: refclk72738khz {
+	soc_uartclk: refclk7273800hz {
 		compatible = "fixed-clock";
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
 		#clock-cells = <0>;
 		clock-frequency = <7273800>;
 		clock-frequency = <7273800>;

+ 24 - 8
arch/arm64/include/asm/cmpxchg.h

@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 	__ret; \
 	__ret; \
 })
 })
 
 
-#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
-	cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
-				o1, o2, n1, n2)
+#define _protect_cmpxchg_local(pcp, o, n)			\
+({								\
+	typeof(*raw_cpu_ptr(&(pcp))) __ret;			\
+	preempt_disable();					\
+	__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);	\
+	preempt_enable();					\
+	__ret;							\
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
+({									\
+	int __ret;							\
+	preempt_disable();						\
+	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
+					raw_cpu_ptr(&(ptr2)),		\
+					o1, o2, n1, n2);		\
+	preempt_enable();						\
+	__ret;								\
+})
 
 
 #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
 #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
 #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
 #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))

+ 9 - 0
arch/arm64/include/asm/mmu_context.h

@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
 {
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 
 
+	/*
+	 * init_mm.pgd does not contain any user mappings and it is always
+	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
+	 */
+	if (next == &init_mm) {
+		cpu_set_reserved_ttbr0();
+		return;
+	}
+
 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
 		check_and_switch_context(next, tsk);
 		check_and_switch_context(next, tsk);
 }
 }

+ 33 - 11
arch/arm64/include/asm/percpu.h

@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
 	return ret;
 	return ret;
 }
 }
 
 
+#define _percpu_read(pcp)						\
+({									\
+	typeof(pcp) __retval;						\
+	preempt_disable();						\
+	__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), 	\
+					      sizeof(pcp));		\
+	preempt_enable();						\
+	__retval;							\
+})
+
+#define _percpu_write(pcp, val)						\
+do {									\
+	preempt_disable();						\
+	__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), 	\
+				sizeof(pcp));				\
+	preempt_enable();						\
+} while(0)								\
+
+#define _pcp_protect(operation, pcp, val)			\
+({								\
+	typeof(pcp) __retval;					\
+	preempt_disable();					\
+	__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),	\
+					  (val), sizeof(pcp));	\
+	preempt_enable();					\
+	__retval;						\
+})
+
 #define _percpu_add(pcp, val) \
 #define _percpu_add(pcp, val) \
-	__percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+	_pcp_protect(__percpu_add, pcp, val)
 
 
-#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
 
 
 #define _percpu_and(pcp, val) \
 #define _percpu_and(pcp, val) \
-	__percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+	_pcp_protect(__percpu_and, pcp, val)
 
 
 #define _percpu_or(pcp, val) \
 #define _percpu_or(pcp, val) \
-	__percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
-
-#define _percpu_read(pcp) (typeof(pcp))	\
-	(__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
-
-#define _percpu_write(pcp, val) \
-	__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+	_pcp_protect(__percpu_or, pcp, val)
 
 
 #define _percpu_xchg(pcp, val) (typeof(pcp)) \
 #define _percpu_xchg(pcp, val) (typeof(pcp)) \
-	(__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+	_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
 
 
 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)

+ 1 - 0
arch/metag/include/asm/io.h

@@ -2,6 +2,7 @@
 #define _ASM_METAG_IO_H
 #define _ASM_METAG_IO_H
 
 
 #include <linux/types.h>
 #include <linux/types.h>
+#include <asm/pgtable-bits.h>
 
 
 #define IO_SPACE_LIMIT  0
 #define IO_SPACE_LIMIT  0
 
 

+ 104 - 0
arch/metag/include/asm/pgtable-bits.h

@@ -0,0 +1,104 @@
+/*
+ * Meta page table definitions.
+ */
+
+#ifndef _METAG_PGTABLE_BITS_H
+#define _METAG_PGTABLE_BITS_H
+
+#include <asm/metag_mem.h>
+
+/*
+ * Definitions for MMU descriptors
+ *
+ * These are the hardware bits in the MMCU pte entries.
+ * Derived from the Meta toolkit headers.
+ */
+#define _PAGE_PRESENT		MMCU_ENTRY_VAL_BIT
+#define _PAGE_WRITE		MMCU_ENTRY_WR_BIT
+#define _PAGE_PRIV		MMCU_ENTRY_PRIV_BIT
+/* Write combine bit - this can cause writes to occur out of order */
+#define _PAGE_WR_COMBINE	MMCU_ENTRY_WRC_BIT
+/* Sys coherent bit - this bit is never used by Linux */
+#define _PAGE_SYS_COHERENT	MMCU_ENTRY_SYS_BIT
+#define _PAGE_ALWAYS_ZERO_1	0x020
+#define _PAGE_CACHE_CTRL0	0x040
+#define _PAGE_CACHE_CTRL1	0x080
+#define _PAGE_ALWAYS_ZERO_2	0x100
+#define _PAGE_ALWAYS_ZERO_3	0x200
+#define _PAGE_ALWAYS_ZERO_4	0x400
+#define _PAGE_ALWAYS_ZERO_5	0x800
+
+/* These are software bits that we stuff into the gaps in the hardware
+ * pte entries that are not used.  Note, these DO get stored in the actual
+ * hardware, but the hardware just does not use them.
+ */
+#define _PAGE_ACCESSED		_PAGE_ALWAYS_ZERO_1
+#define _PAGE_DIRTY		_PAGE_ALWAYS_ZERO_2
+
+/* Pages owned, and protected by, the kernel. */
+#define _PAGE_KERNEL		_PAGE_PRIV
+
+/* No cacheing of this page */
+#define _PAGE_CACHE_WIN0	(MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
+/* burst cacheing - good for data streaming */
+#define _PAGE_CACHE_WIN1	(MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
+/* One cache way per thread */
+#define _PAGE_CACHE_WIN2	(MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
+/* Full on cacheing */
+#define _PAGE_CACHE_WIN3	(MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
+
+#define _PAGE_CACHEABLE		(_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
+
+/* which bits are used for cache control ... */
+#define _PAGE_CACHE_MASK	(_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
+				 _PAGE_WR_COMBINE)
+
+/* This is a mask of the bits that pte_modify is allowed to change. */
+#define _PAGE_CHG_MASK		(PAGE_MASK)
+
+#define _PAGE_SZ_SHIFT		1
+#define _PAGE_SZ_4K		(0x0)
+#define _PAGE_SZ_8K		(0x1 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_16K		(0x2 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_32K		(0x3 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_64K		(0x4 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_128K		(0x5 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_256K		(0x6 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_512K		(0x7 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_1M		(0x8 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_2M		(0x9 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_4M		(0xa << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_MASK		(0xf << _PAGE_SZ_SHIFT)
+
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define _PAGE_SZ		(_PAGE_SZ_4K)
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define _PAGE_SZ		(_PAGE_SZ_8K)
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define _PAGE_SZ		(_PAGE_SZ_16K)
+#endif
+#define _PAGE_TABLE		(_PAGE_SZ | _PAGE_PRESENT)
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define _PAGE_SZHUGE		(_PAGE_SZ_8K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define _PAGE_SZHUGE		(_PAGE_SZ_16K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define _PAGE_SZHUGE		(_PAGE_SZ_32K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define _PAGE_SZHUGE		(_PAGE_SZ_64K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define _PAGE_SZHUGE		(_PAGE_SZ_128K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define _PAGE_SZHUGE		(_PAGE_SZ_256K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define _PAGE_SZHUGE		(_PAGE_SZ_512K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define _PAGE_SZHUGE		(_PAGE_SZ_1M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define _PAGE_SZHUGE		(_PAGE_SZ_2M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define _PAGE_SZHUGE		(_PAGE_SZ_4M)
+#endif
+
+#endif /* _METAG_PGTABLE_BITS_H */

+ 1 - 94
arch/metag/include/asm/pgtable.h

@@ -5,6 +5,7 @@
 #ifndef _METAG_PGTABLE_H
 #ifndef _METAG_PGTABLE_H
 #define _METAG_PGTABLE_H
 #define _METAG_PGTABLE_H
 
 
+#include <asm/pgtable-bits.h>
 #include <asm-generic/pgtable-nopmd.h>
 #include <asm-generic/pgtable-nopmd.h>
 
 
 /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
 /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@@ -20,100 +21,6 @@
 #define VMALLOC_END		0x7FFFFFFF
 #define VMALLOC_END		0x7FFFFFFF
 #endif
 #endif
 
 
-/*
- * Definitions for MMU descriptors
- *
- * These are the hardware bits in the MMCU pte entries.
- * Derived from the Meta toolkit headers.
- */
-#define _PAGE_PRESENT		MMCU_ENTRY_VAL_BIT
-#define _PAGE_WRITE		MMCU_ENTRY_WR_BIT
-#define _PAGE_PRIV		MMCU_ENTRY_PRIV_BIT
-/* Write combine bit - this can cause writes to occur out of order */
-#define _PAGE_WR_COMBINE	MMCU_ENTRY_WRC_BIT
-/* Sys coherent bit - this bit is never used by Linux */
-#define _PAGE_SYS_COHERENT	MMCU_ENTRY_SYS_BIT
-#define _PAGE_ALWAYS_ZERO_1	0x020
-#define _PAGE_CACHE_CTRL0	0x040
-#define _PAGE_CACHE_CTRL1	0x080
-#define _PAGE_ALWAYS_ZERO_2	0x100
-#define _PAGE_ALWAYS_ZERO_3	0x200
-#define _PAGE_ALWAYS_ZERO_4	0x400
-#define _PAGE_ALWAYS_ZERO_5	0x800
-
-/* These are software bits that we stuff into the gaps in the hardware
- * pte entries that are not used.  Note, these DO get stored in the actual
- * hardware, but the hardware just does not use them.
- */
-#define _PAGE_ACCESSED		_PAGE_ALWAYS_ZERO_1
-#define _PAGE_DIRTY		_PAGE_ALWAYS_ZERO_2
-
-/* Pages owned, and protected by, the kernel. */
-#define _PAGE_KERNEL		_PAGE_PRIV
-
-/* No cacheing of this page */
-#define _PAGE_CACHE_WIN0	(MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
-/* burst cacheing - good for data streaming */
-#define _PAGE_CACHE_WIN1	(MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
-/* One cache way per thread */
-#define _PAGE_CACHE_WIN2	(MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
-/* Full on cacheing */
-#define _PAGE_CACHE_WIN3	(MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
-
-#define _PAGE_CACHEABLE		(_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
-
-/* which bits are used for cache control ... */
-#define _PAGE_CACHE_MASK	(_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
-				 _PAGE_WR_COMBINE)
-
-/* This is a mask of the bits that pte_modify is allowed to change. */
-#define _PAGE_CHG_MASK		(PAGE_MASK)
-
-#define _PAGE_SZ_SHIFT		1
-#define _PAGE_SZ_4K		(0x0)
-#define _PAGE_SZ_8K		(0x1 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_16K		(0x2 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_32K		(0x3 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_64K		(0x4 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_128K		(0x5 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_256K		(0x6 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_512K		(0x7 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_1M		(0x8 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_2M		(0x9 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_4M		(0xa << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_MASK		(0xf << _PAGE_SZ_SHIFT)
-
-#if defined(CONFIG_PAGE_SIZE_4K)
-#define _PAGE_SZ		(_PAGE_SZ_4K)
-#elif defined(CONFIG_PAGE_SIZE_8K)
-#define _PAGE_SZ		(_PAGE_SZ_8K)
-#elif defined(CONFIG_PAGE_SIZE_16K)
-#define _PAGE_SZ		(_PAGE_SZ_16K)
-#endif
-#define _PAGE_TABLE		(_PAGE_SZ | _PAGE_PRESENT)
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
-# define _PAGE_SZHUGE		(_PAGE_SZ_8K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
-# define _PAGE_SZHUGE		(_PAGE_SZ_16K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
-# define _PAGE_SZHUGE		(_PAGE_SZ_32K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-# define _PAGE_SZHUGE		(_PAGE_SZ_64K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
-# define _PAGE_SZHUGE		(_PAGE_SZ_128K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
-# define _PAGE_SZHUGE		(_PAGE_SZ_256K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-# define _PAGE_SZHUGE		(_PAGE_SZ_512K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
-# define _PAGE_SZHUGE		(_PAGE_SZ_1M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
-# define _PAGE_SZHUGE		(_PAGE_SZ_2M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
-# define _PAGE_SZHUGE		(_PAGE_SZ_4M)
-#endif
-
 /*
 /*
  * The Linux memory management assumes a three-level page table setup. On
  * The Linux memory management assumes a three-level page table setup. On
  * Meta, we use that, but "fold" the mid level into the top-level page
  * Meta, we use that, but "fold" the mid level into the top-level page

+ 10 - 7
arch/parisc/include/asm/pgalloc.h

@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
 
 	if (likely(pgd != NULL)) {
 	if (likely(pgd != NULL)) {
 		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
 		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 		actual_pgd += PTRS_PER_PGD;
 		actual_pgd += PTRS_PER_PGD;
 		/* Populate first pmd with allocated memory.  We mark it
 		/* Populate first pmd with allocated memory.  We mark it
 		 * with PxD_FLAG_ATTACHED as a signal to the system that this
 		 * with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
 {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 	pgd -= PTRS_PER_PGD;
 	pgd -= PTRS_PER_PGD;
 #endif
 #endif
 	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
 	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
 {
-#ifdef CONFIG_64BIT
 	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
-		/* This is the permanent pmd attached to the pgd;
-		 * cannot free it */
+		/*
+		 * This is the permanent pmd attached to the pgd;
+		 * cannot free it.
+		 * Increment the counter to compensate for the decrement
+		 * done by generic mm code.
+		 */
+		mm_inc_nr_pmds(mm);
 		return;
 		return;
-#endif
 	free_pages((unsigned long)pmd, PMD_ORDER);
 	free_pages((unsigned long)pmd, PMD_ORDER);
 }
 }
 
 
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void
 static inline void
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 {
 {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 	/* preserve the gateway marker if this is the beginning of
 	/* preserve the gateway marker if this is the beginning of
 	 * the permanent pmd */
 	 * the permanent pmd */
 	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)

+ 6 - 3
arch/parisc/kernel/syscall_table.S

@@ -55,8 +55,8 @@
 #define ENTRY_COMP(_name_) .word sys_##_name_
 #define ENTRY_COMP(_name_) .word sys_##_name_
 #endif
 #endif
 
 
-	ENTRY_SAME(restart_syscall)	/* 0 */
-	ENTRY_SAME(exit)
+90:	ENTRY_SAME(restart_syscall)	/* 0 */
+91:	ENTRY_SAME(exit)
 	ENTRY_SAME(fork_wrapper)
 	ENTRY_SAME(fork_wrapper)
 	ENTRY_SAME(read)
 	ENTRY_SAME(read)
 	ENTRY_SAME(write)
 	ENTRY_SAME(write)
@@ -439,7 +439,10 @@
 	ENTRY_SAME(bpf)
 	ENTRY_SAME(bpf)
 	ENTRY_COMP(execveat)
 	ENTRY_COMP(execveat)
 
 
-	/* Nothing yet */
+
+.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
+.error "size of syscall table does not fit value of __NR_Linux_syscalls"
+.endif
 
 
 #undef ENTRY_SAME
 #undef ENTRY_SAME
 #undef ENTRY_DIFF
 #undef ENTRY_DIFF

+ 1 - 1
arch/powerpc/include/asm/cputhreads.h

@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
 
 
 static inline int cpu_nr_cores(void)
 static inline int cpu_nr_cores(void)
 {
 {
-	return NR_CPUS >> threads_shift;
+	return nr_cpu_ids >> threads_shift;
 }
 }
 
 
 static inline cpumask_t cpu_online_cores_map(void)
 static inline cpumask_t cpu_online_cores_map(void)

+ 3 - 0
arch/powerpc/include/asm/ppc-opcode.h

@@ -153,6 +153,7 @@
 #define PPC_INST_MFSPR_PVR_MASK		0xfc1fffff
 #define PPC_INST_MFSPR_PVR_MASK		0xfc1fffff
 #define PPC_INST_MFTMR			0x7c0002dc
 #define PPC_INST_MFTMR			0x7c0002dc
 #define PPC_INST_MSGSND			0x7c00019c
 #define PPC_INST_MSGSND			0x7c00019c
+#define PPC_INST_MSGCLR			0x7c0001dc
 #define PPC_INST_MSGSNDP		0x7c00011c
 #define PPC_INST_MSGSNDP		0x7c00011c
 #define PPC_INST_MTTMR			0x7c0003dc
 #define PPC_INST_MTTMR			0x7c0003dc
 #define PPC_INST_NOP			0x60000000
 #define PPC_INST_NOP			0x60000000
@@ -309,6 +310,8 @@
 					___PPC_RB(b) | __PPC_EH(eh))
 					___PPC_RB(b) | __PPC_EH(eh))
 #define PPC_MSGSND(b)		stringify_in_c(.long PPC_INST_MSGSND | \
 #define PPC_MSGSND(b)		stringify_in_c(.long PPC_INST_MSGSND | \
 					___PPC_RB(b))
 					___PPC_RB(b))
+#define PPC_MSGCLR(b)		stringify_in_c(.long PPC_INST_MSGCLR | \
+					___PPC_RB(b))
 #define PPC_MSGSNDP(b)		stringify_in_c(.long PPC_INST_MSGSNDP | \
 #define PPC_MSGSNDP(b)		stringify_in_c(.long PPC_INST_MSGSNDP | \
 					___PPC_RB(b))
 					___PPC_RB(b))
 #define PPC_POPCNTB(a, s)	stringify_in_c(.long PPC_INST_POPCNTB | \
 #define PPC_POPCNTB(a, s)	stringify_in_c(.long PPC_INST_POPCNTB | \

+ 3 - 0
arch/powerpc/include/asm/reg.h

@@ -608,13 +608,16 @@
 #define   SRR1_ISI_N_OR_G	0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_N_OR_G	0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_PROT		0x08000000 /* ISI: Other protection fault */
 #define   SRR1_ISI_PROT		0x08000000 /* ISI: Other protection fault */
 #define   SRR1_WAKEMASK		0x00380000 /* reason for wakeup */
 #define   SRR1_WAKEMASK		0x00380000 /* reason for wakeup */
+#define   SRR1_WAKEMASK_P8	0x003c0000 /* reason for wakeup on POWER8 */
 #define   SRR1_WAKESYSERR	0x00300000 /* System error */
 #define   SRR1_WAKESYSERR	0x00300000 /* System error */
 #define   SRR1_WAKEEE		0x00200000 /* External interrupt */
 #define   SRR1_WAKEEE		0x00200000 /* External interrupt */
 #define   SRR1_WAKEMT		0x00280000 /* mtctrl */
 #define   SRR1_WAKEMT		0x00280000 /* mtctrl */
 #define	  SRR1_WAKEHMI		0x00280000 /* Hypervisor maintenance */
 #define	  SRR1_WAKEHMI		0x00280000 /* Hypervisor maintenance */
 #define   SRR1_WAKEDEC		0x00180000 /* Decrementer interrupt */
 #define   SRR1_WAKEDEC		0x00180000 /* Decrementer interrupt */
+#define   SRR1_WAKEDBELL	0x00140000 /* Privileged doorbell on P8 */
 #define   SRR1_WAKETHERM	0x00100000 /* Thermal management interrupt */
 #define   SRR1_WAKETHERM	0x00100000 /* Thermal management interrupt */
 #define	  SRR1_WAKERESET	0x00100000 /* System reset */
 #define	  SRR1_WAKERESET	0x00100000 /* System reset */
+#define   SRR1_WAKEHDBELL	0x000c0000 /* Hypervisor doorbell on P8 */
 #define	  SRR1_WAKESTATE	0x00030000 /* Powersave exit mask [46:47] */
 #define	  SRR1_WAKESTATE	0x00030000 /* Powersave exit mask [46:47] */
 #define	  SRR1_WS_DEEPEST	0x00030000 /* Some resources not maintained,
 #define	  SRR1_WS_DEEPEST	0x00030000 /* Some resources not maintained,
 					  * may not be recoverable */
 					  * may not be recoverable */

+ 20 - 0
arch/powerpc/kernel/cputable.c

@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.machine_check_early	= __machine_check_early_realmode_p8,
 		.machine_check_early	= __machine_check_early_realmode_p8,
 		.platform		= "power8",
 		.platform		= "power8",
 	},
 	},
+	{	/* Power8NVL */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x004c0000,
+		.cpu_name		= "POWER8NVL (raw)",
+		.cpu_features		= CPU_FTRS_POWER8,
+		.cpu_user_features	= COMMON_USER_POWER8,
+		.cpu_user_features2	= COMMON_USER2_POWER8,
+		.mmu_features		= MMU_FTRS_POWER8,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/power8",
+		.oprofile_type		= PPC_OPROFILE_INVALID,
+		.cpu_setup		= __setup_cpu_power8,
+		.cpu_restore		= __restore_cpu_power8,
+		.flush_tlb		= __flush_tlb_power8,
+		.machine_check_early	= __machine_check_early_realmode_p8,
+		.platform		= "power8",
+	},
 	{	/* Power8 DD1: Does not support doorbell IPIs */
 	{	/* Power8 DD1: Does not support doorbell IPIs */
 		.pvr_mask		= 0xffffff00,
 		.pvr_mask		= 0xffffff00,
 		.pvr_value		= 0x004d0100,
 		.pvr_value		= 0x004d0100,

+ 2 - 0
arch/powerpc/kernel/dbell.c

@@ -17,6 +17,7 @@
 
 
 #include <asm/dbell.h>
 #include <asm/dbell.h>
 #include <asm/irq_regs.h>
 #include <asm/irq_regs.h>
+#include <asm/kvm_ppc.h>
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 void doorbell_setup_this_cpu(void)
 void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
 
 
 	may_hard_irq_enable();
 	may_hard_irq_enable();
 
 
+	kvmppc_set_host_ipi(smp_processor_id(), 0);
 	__this_cpu_inc(irq_stat.doorbell_irqs);
 	__this_cpu_inc(irq_stat.doorbell_irqs);
 
 
 	smp_ipi_demux();
 	smp_ipi_demux();

+ 1 - 1
arch/powerpc/kernel/exceptions-64s.S

@@ -1408,7 +1408,7 @@ machine_check_handle_early:
 	bne	9f			/* continue in V mode if we are. */
 	bne	9f			/* continue in V mode if we are. */
 
 
 5:
 5:
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 	/*
 	/*
 	 * We are coming from kernel context. Check if we are coming from
 	 * We are coming from kernel context. Check if we are coming from
 	 * guest. if yes, then we can continue. We will fall through
 	 * guest. if yes, then we can continue. We will fall through

+ 4 - 4
arch/powerpc/kvm/book3s_hv.c

@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
 	spin_lock(&vcpu->arch.vpa_update_lock);
 	spin_lock(&vcpu->arch.vpa_update_lock);
 	lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
 	lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
 	if (lppaca)
 	if (lppaca)
-		yield_count = lppaca->yield_count;
+		yield_count = be32_to_cpu(lppaca->yield_count);
 	spin_unlock(&vcpu->arch.vpa_update_lock);
 	spin_unlock(&vcpu->arch.vpa_update_lock);
 	return yield_count;
 	return yield_count;
 }
 }
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
 		bool preserve_top32)
 		bool preserve_top32)
 {
 {
+	struct kvm *kvm = vcpu->kvm;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	u64 mask;
 	u64 mask;
 
 
+	mutex_lock(&kvm->lock);
 	spin_lock(&vc->lock);
 	spin_lock(&vc->lock);
 	/*
 	/*
 	 * If ILE (interrupt little-endian) has changed, update the
 	 * If ILE (interrupt little-endian) has changed, update the
 	 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
 	 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
 	 */
 	 */
 	if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
 	if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
-		struct kvm *kvm = vcpu->kvm;
 		struct kvm_vcpu *vcpu;
 		struct kvm_vcpu *vcpu;
 		int i;
 		int i;
 
 
-		mutex_lock(&kvm->lock);
 		kvm_for_each_vcpu(i, vcpu, kvm) {
 		kvm_for_each_vcpu(i, vcpu, kvm) {
 			if (vcpu->arch.vcore != vc)
 			if (vcpu->arch.vcore != vc)
 				continue;
 				continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
 			else
 			else
 				vcpu->arch.intr_msr &= ~MSR_LE;
 				vcpu->arch.intr_msr &= ~MSR_LE;
 		}
 		}
-		mutex_unlock(&kvm->lock);
 	}
 	}
 
 
 	/*
 	/*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
 		mask &= 0xFFFFFFFF;
 		mask &= 0xFFFFFFFF;
 	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
 	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
 	spin_unlock(&vc->lock);
 	spin_unlock(&vc->lock);
+	mutex_unlock(&kvm->lock);
 }
 }
 
 
 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,

+ 1 - 0
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	/* Save HEIR (HV emulation assist reg) in emul_inst
 	/* Save HEIR (HV emulation assist reg) in emul_inst
 	   if this is an HEI (HV emulation interrupt, e40) */
 	   if this is an HEI (HV emulation interrupt, e40) */
 	li	r3,KVM_INST_FETCH_FAILED
 	li	r3,KVM_INST_FETCH_FAILED
+	stw	r3,VCPU_LAST_INST(r9)
 	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
 	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
 	bne	11f
 	bne	11f
 	mfspr	r3,SPRN_HEIR
 	mfspr	r3,SPRN_HEIR

+ 12 - 2
arch/powerpc/platforms/powernv/smp.c

@@ -33,6 +33,8 @@
 #include <asm/runlatch.h>
 #include <asm/runlatch.h>
 #include <asm/code-patching.h>
 #include <asm/code-patching.h>
 #include <asm/dbell.h>
 #include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
 
 
 #include "powernv.h"
 #include "powernv.h"
 
 
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
 static void pnv_smp_cpu_kill_self(void)
 static void pnv_smp_cpu_kill_self(void)
 {
 {
 	unsigned int cpu;
 	unsigned int cpu;
-	unsigned long srr1;
+	unsigned long srr1, wmask;
 	u32 idle_states;
 	u32 idle_states;
 
 
 	/* Standard hot unplug procedure */
 	/* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
 	generic_set_cpu_dead(cpu);
 	generic_set_cpu_dead(cpu);
 	smp_wmb();
 	smp_wmb();
 
 
+	wmask = SRR1_WAKEMASK;
+	if (cpu_has_feature(CPU_FTR_ARCH_207S))
+		wmask = SRR1_WAKEMASK_P8;
+
 	idle_states = pnv_get_supported_cpuidle_states();
 	idle_states = pnv_get_supported_cpuidle_states();
 	/* We don't want to take decrementer interrupts while we are offline,
 	/* We don't want to take decrementer interrupts while we are offline,
 	 * so clear LPCR:PECE1. We keep PECE2 enabled.
 	 * so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
 		 * having finished executing in a KVM guest, then srr1
 		 * having finished executing in a KVM guest, then srr1
 		 * contains 0.
 		 * contains 0.
 		 */
 		 */
-		if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
+		if ((srr1 & wmask) == SRR1_WAKEEE) {
 			icp_native_flush_interrupt();
 			icp_native_flush_interrupt();
 			local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
 			local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
 			smp_mb();
 			smp_mb();
+		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
+			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+			asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
+			kvmppc_set_host_ipi(cpu, 0);
 		}
 		}
 
 
 		if (cpu_core_split_required())
 		if (cpu_core_split_required())

+ 23 - 21
arch/powerpc/platforms/pseries/mobility.c

@@ -25,10 +25,10 @@
 static struct kobject *mobility_kobj;
 static struct kobject *mobility_kobj;
 
 
 struct update_props_workarea {
 struct update_props_workarea {
-	u32 phandle;
-	u32 state;
-	u64 reserved;
-	u32 nprops;
+	__be32 phandle;
+	__be32 state;
+	__be64 reserved;
+	__be32 nprops;
 } __packed;
 } __packed;
 
 
 #define NODE_ACTION_MASK	0xff000000
 #define NODE_ACTION_MASK	0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
 	return rc;
 	return rc;
 }
 }
 
 
-static int delete_dt_node(u32 phandle)
+static int delete_dt_node(__be32 phandle)
 {
 {
 	struct device_node *dn;
 	struct device_node *dn;
 
 
-	dn = of_find_node_by_phandle(phandle);
+	dn = of_find_node_by_phandle(be32_to_cpu(phandle));
 	if (!dn)
 	if (!dn)
 		return -ENOENT;
 		return -ENOENT;
 
 
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
 	return 0;
 	return 0;
 }
 }
 
 
-static int update_dt_node(u32 phandle, s32 scope)
+static int update_dt_node(__be32 phandle, s32 scope)
 {
 {
 	struct update_props_workarea *upwa;
 	struct update_props_workarea *upwa;
 	struct device_node *dn;
 	struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
 	char *prop_data;
 	char *prop_data;
 	char *rtas_buf;
 	char *rtas_buf;
 	int update_properties_token;
 	int update_properties_token;
+	u32 nprops;
 	u32 vd;
 	u32 vd;
 
 
 	update_properties_token = rtas_token("ibm,update-properties");
 	update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
 	if (!rtas_buf)
 	if (!rtas_buf)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	dn = of_find_node_by_phandle(phandle);
+	dn = of_find_node_by_phandle(be32_to_cpu(phandle));
 	if (!dn) {
 	if (!dn) {
 		kfree(rtas_buf);
 		kfree(rtas_buf);
 		return -ENOENT;
 		return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
 			break;
 			break;
 
 
 		prop_data = rtas_buf + sizeof(*upwa);
 		prop_data = rtas_buf + sizeof(*upwa);
+		nprops = be32_to_cpu(upwa->nprops);
 
 
 		/* On the first call to ibm,update-properties for a node the
 		/* On the first call to ibm,update-properties for a node the
 		 * the first property value descriptor contains an empty
 		 * the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
 		 */
 		 */
 		if (*prop_data == 0) {
 		if (*prop_data == 0) {
 			prop_data++;
 			prop_data++;
-			vd = *(u32 *)prop_data;
+			vd = be32_to_cpu(*(__be32 *)prop_data);
 			prop_data += vd + sizeof(vd);
 			prop_data += vd + sizeof(vd);
-			upwa->nprops--;
+			nprops--;
 		}
 		}
 
 
-		for (i = 0; i < upwa->nprops; i++) {
+		for (i = 0; i < nprops; i++) {
 			char *prop_name;
 			char *prop_name;
 
 
 			prop_name = prop_data;
 			prop_name = prop_data;
 			prop_data += strlen(prop_name) + 1;
 			prop_data += strlen(prop_name) + 1;
-			vd = *(u32 *)prop_data;
+			vd = be32_to_cpu(*(__be32 *)prop_data);
 			prop_data += sizeof(vd);
 			prop_data += sizeof(vd);
 
 
 			switch (vd) {
 			switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
 	return 0;
 	return 0;
 }
 }
 
 
-static int add_dt_node(u32 parent_phandle, u32 drc_index)
+static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
 {
 {
 	struct device_node *dn;
 	struct device_node *dn;
 	struct device_node *parent_dn;
 	struct device_node *parent_dn;
 	int rc;
 	int rc;
 
 
-	parent_dn = of_find_node_by_phandle(parent_phandle);
+	parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
 	if (!parent_dn)
 	if (!parent_dn)
 		return -ENOENT;
 		return -ENOENT;
 
 
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
 int pseries_devicetree_update(s32 scope)
 int pseries_devicetree_update(s32 scope)
 {
 {
 	char *rtas_buf;
 	char *rtas_buf;
-	u32 *data;
+	__be32 *data;
 	int update_nodes_token;
 	int update_nodes_token;
 	int rc;
 	int rc;
 
 
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
 		if (rc && rc != 1)
 		if (rc && rc != 1)
 			break;
 			break;
 
 
-		data = (u32 *)rtas_buf + 4;
-		while (*data & NODE_ACTION_MASK) {
+		data = (__be32 *)rtas_buf + 4;
+		while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
 			int i;
 			int i;
-			u32 action = *data & NODE_ACTION_MASK;
-			int node_count = *data & NODE_COUNT_MASK;
+			u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+			u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
 
 
 			data++;
 			data++;
 
 
 			for (i = 0; i < node_count; i++) {
 			for (i = 0; i < node_count; i++) {
-				u32 phandle = *data++;
-				u32 drc_index;
+				__be32 phandle = *data++;
+				__be32 drc_index;
 
 
 				switch (action) {
 				switch (action) {
 				case DELETE_DT_NODE:
 				case DELETE_DT_NODE:

+ 1 - 1
arch/s390/include/asm/elf.h

@@ -211,7 +211,7 @@ do {								\
 
 
 extern unsigned long mmap_rnd_mask;
 extern unsigned long mmap_rnd_mask;
 
 
-#define STACK_RND_MASK	(mmap_rnd_mask)
+#define STACK_RND_MASK	(test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
 
 
 #define ARCH_DLINFO							    \
 #define ARCH_DLINFO							    \
 do {									    \
 do {									    \

+ 45 - 16
arch/s390/kernel/ftrace.c

@@ -57,6 +57,44 @@
 
 
 unsigned long ftrace_plt;
 unsigned long ftrace_plt;
 
 
+static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+{
+#ifdef CC_USING_HOTPATCH
+	/* brcl 0,0 */
+	insn->opc = 0xc004;
+	insn->disp = 0;
+#else
+	/* stg r14,8(r15) */
+	insn->opc = 0xe3e0;
+	insn->disp = 0xf0080024;
+#endif
+}
+
+static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+	if (insn->opc == BREAKPOINT_INSTRUCTION)
+		return 1;
+#endif
+	return 0;
+}
+
+static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+	insn->opc = BREAKPOINT_INSTRUCTION;
+	insn->disp = KPROBE_ON_FTRACE_NOP;
+#endif
+}
+
+static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+	insn->opc = BREAKPOINT_INSTRUCTION;
+	insn->disp = KPROBE_ON_FTRACE_CALL;
+#endif
+}
+
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 		       unsigned long addr)
 		       unsigned long addr)
 {
 {
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 		return -EFAULT;
 		return -EFAULT;
 	if (addr == MCOUNT_ADDR) {
 	if (addr == MCOUNT_ADDR) {
 		/* Initial code replacement */
 		/* Initial code replacement */
-#ifdef CC_USING_HOTPATCH
-		/* We expect to see brcl 0,0 */
-		ftrace_generate_nop_insn(&orig);
-#else
-		/* We expect to see stg r14,8(r15) */
-		orig.opc = 0xe3e0;
-		orig.disp = 0xf0080024;
-#endif
+		ftrace_generate_orig_insn(&orig);
 		ftrace_generate_nop_insn(&new);
 		ftrace_generate_nop_insn(&new);
-	} else if (old.opc == BREAKPOINT_INSTRUCTION) {
+	} else if (is_kprobe_on_ftrace(&old)) {
 		/*
 		/*
 		 * If we find a breakpoint instruction, a kprobe has been
 		 * If we find a breakpoint instruction, a kprobe has been
 		 * placed at the beginning of the function. We write the
 		 * placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 		 * bytes of the original instruction so that the kprobes
 		 * bytes of the original instruction so that the kprobes
 		 * handler can execute a nop, if it reaches this breakpoint.
 		 * handler can execute a nop, if it reaches this breakpoint.
 		 */
 		 */
-		new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
-		orig.disp = KPROBE_ON_FTRACE_CALL;
-		new.disp = KPROBE_ON_FTRACE_NOP;
+		ftrace_generate_kprobe_call_insn(&orig);
+		ftrace_generate_kprobe_nop_insn(&new);
 	} else {
 	} else {
 		/* Replace ftrace call with a nop. */
 		/* Replace ftrace call with a nop. */
 		ftrace_generate_call_insn(&orig, rec->ip);
 		ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 
 	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
 	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
 		return -EFAULT;
 		return -EFAULT;
-	if (old.opc == BREAKPOINT_INSTRUCTION) {
+	if (is_kprobe_on_ftrace(&old)) {
 		/*
 		/*
 		 * If we find a breakpoint instruction, a kprobe has been
 		 * If we find a breakpoint instruction, a kprobe has been
 		 * placed at the beginning of the function. We write the
 		 * placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 		 * bytes of the original instruction so that the kprobes
 		 * bytes of the original instruction so that the kprobes
 		 * handler can execute a brasl if it reaches this breakpoint.
 		 * handler can execute a brasl if it reaches this breakpoint.
 		 */
 		 */
-		new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
-		orig.disp = KPROBE_ON_FTRACE_NOP;
-		new.disp = KPROBE_ON_FTRACE_CALL;
+		ftrace_generate_kprobe_nop_insn(&orig);
+		ftrace_generate_kprobe_call_insn(&new);
 	} else {
 	} else {
 		/* Replace nop with an ftrace call. */
 		/* Replace nop with an ftrace call. */
 		ftrace_generate_nop_insn(&orig);
 		ftrace_generate_nop_insn(&orig);

+ 5 - 2
arch/s390/kernel/perf_cpum_sf.c

@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
 
 
 static struct attribute *cpumsf_pmu_events_attr[] = {
 static struct attribute *cpumsf_pmu_events_attr[] = {
 	CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
 	CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
-	CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+	NULL,
 	NULL,
 	NULL,
 };
 };
 
 
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	if (si.ad)
+	if (si.ad) {
 		sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
 		sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+		cpumsf_pmu_events_attr[1] =
+			CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
+	}
 
 
 	sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
 	sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
 	if (!sfdbg)
 	if (!sfdbg)

+ 11 - 0
arch/s390/kernel/swsusp_asm64.S

@@ -177,6 +177,17 @@ restart_entry:
 	lhi	%r1,1
 	lhi	%r1,1
 	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE
 	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE
 	sam64
 	sam64
+#ifdef CONFIG_SMP
+	larl	%r1,smp_cpu_mt_shift
+	icm	%r1,15,0(%r1)
+	jz	smt_done
+	llgfr	%r1,%r1
+smt_loop:
+	sigp	%r1,%r0,SIGP_SET_MULTI_THREADING
+	brc	8,smt_done			/* accepted */
+	brc	2,smt_loop			/* busy, try again */
+smt_done:
+#endif
 	larl	%r1,.Lnew_pgm_check_psw
 	larl	%r1,.Lnew_pgm_check_psw
 	lpswe	0(%r1)
 	lpswe	0(%r1)
 pgm_check_entry:
 pgm_check_entry:

+ 12 - 0
arch/sparc/include/asm/hypervisor.h

@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
 				   unsigned long reg_val);
 				   unsigned long reg_val);
 #endif
 #endif
 
 
+
+#define HV_FAST_M7_GET_PERFREG	0x43
+#define HV_FAST_M7_SET_PERFREG	0x44
+
+#ifndef	__ASSEMBLY__
+unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
+				      unsigned long *reg_val);
+unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
+				      unsigned long reg_val);
+#endif
+
 /* Function numbers for HV_CORE_TRAP.  */
 /* Function numbers for HV_CORE_TRAP.  */
 #define HV_CORE_SET_VER			0x00
 #define HV_CORE_SET_VER			0x00
 #define HV_CORE_PUTCHAR			0x01
 #define HV_CORE_PUTCHAR			0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
 #define HV_GRP_SDIO			0x0108
 #define HV_GRP_SDIO			0x0108
 #define HV_GRP_SDIO_ERR			0x0109
 #define HV_GRP_SDIO_ERR			0x0109
 #define HV_GRP_REBOOT_DATA		0x0110
 #define HV_GRP_REBOOT_DATA		0x0110
+#define HV_GRP_M7_PERF			0x0114
 #define HV_GRP_NIAG_PERF		0x0200
 #define HV_GRP_NIAG_PERF		0x0200
 #define HV_GRP_FIRE_PERF		0x0201
 #define HV_GRP_FIRE_PERF		0x0201
 #define HV_GRP_N2_CPU			0x0202
 #define HV_GRP_N2_CPU			0x0202

+ 1 - 0
arch/sparc/kernel/hvapi.c

@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
 	{ .group = HV_GRP_VT_CPU,				},
 	{ .group = HV_GRP_VT_CPU,				},
 	{ .group = HV_GRP_T5_CPU,				},
 	{ .group = HV_GRP_T5_CPU,				},
 	{ .group = HV_GRP_DIAG,		.flags = FLAG_PRE_API	},
 	{ .group = HV_GRP_DIAG,		.flags = FLAG_PRE_API	},
+	{ .group = HV_GRP_M7_PERF,				},
 };
 };
 
 
 static DEFINE_SPINLOCK(hvapi_lock);
 static DEFINE_SPINLOCK(hvapi_lock);

+ 16 - 0
arch/sparc/kernel/hvcalls.S

@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
 	retl
 	retl
 	 nop
 	 nop
 ENDPROC(sun4v_t5_set_perfreg)
 ENDPROC(sun4v_t5_set_perfreg)
+
+ENTRY(sun4v_m7_get_perfreg)
+	mov	%o1, %o4
+	mov	HV_FAST_M7_GET_PERFREG, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o4]
+	retl
+	nop
+ENDPROC(sun4v_m7_get_perfreg)
+
+ENTRY(sun4v_m7_set_perfreg)
+	mov	HV_FAST_M7_SET_PERFREG, %o5
+	ta	HV_FAST_TRAP
+	retl
+	nop
+ENDPROC(sun4v_m7_set_perfreg)

+ 33 - 0
arch/sparc/kernel/pcr.c

@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
 	.pcr_nmi_disable	= PCR_N4_PICNPT,
 	.pcr_nmi_disable	= PCR_N4_PICNPT,
 };
 };
 
 
+static u64 m7_pcr_read(unsigned long reg_num)
+{
+	unsigned long val;
+
+	(void) sun4v_m7_get_perfreg(reg_num, &val);
+
+	return val;
+}
+
+static void m7_pcr_write(unsigned long reg_num, u64 val)
+{
+	(void) sun4v_m7_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops m7_pcr_ops = {
+	.read_pcr		= m7_pcr_read,
+	.write_pcr		= m7_pcr_write,
+	.read_pic		= n4_pic_read,
+	.write_pic		= n4_pic_write,
+	.nmi_picl_value		= n4_picl_value,
+	.pcr_nmi_enable		= (PCR_N4_PICNPT | PCR_N4_STRACE |
+				   PCR_N4_UTRACE | PCR_N4_TOE |
+				   (26 << PCR_N4_SL_SHIFT)),
+	.pcr_nmi_disable	= PCR_N4_PICNPT,
+};
 
 
 static unsigned long perf_hsvc_group;
 static unsigned long perf_hsvc_group;
 static unsigned long perf_hsvc_major;
 static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
 			perf_hsvc_group = HV_GRP_T5_CPU;
 			perf_hsvc_group = HV_GRP_T5_CPU;
 			break;
 			break;
 
 
+		case SUN4V_CHIP_SPARC_M7:
+			perf_hsvc_group = HV_GRP_M7_PERF;
+			break;
+
 		default:
 		default:
 			return -ENODEV;
 			return -ENODEV;
 		}
 		}
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
 		pcr_ops = &n5_pcr_ops;
 		pcr_ops = &n5_pcr_ops;
 		break;
 		break;
 
 
+	case SUN4V_CHIP_SPARC_M7:
+		pcr_ops = &m7_pcr_ops;
+		break;
+
 	default:
 	default:
 		ret = -ENODEV;
 		ret = -ENODEV;
 		break;
 		break;

+ 43 - 12
arch/sparc/kernel/perf_event.c

@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
 	.num_pic_regs	= 4,
 	.num_pic_regs	= 4,
 };
 };
 
 
+static void sparc_m7_write_pmc(int idx, u64 val)
+{
+	u64 pcr;
+
+	pcr = pcr_ops->read_pcr(idx);
+	/* ensure ov and ntc are reset */
+	pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
+
+	pcr_ops->write_pic(idx, val & 0xffffffff);
+
+	pcr_ops->write_pcr(idx, pcr);
+}
+
+static const struct sparc_pmu sparc_m7_pmu = {
+	.event_map	= niagara4_event_map,
+	.cache_map	= &niagara4_cache_map,
+	.max_events	= ARRAY_SIZE(niagara4_perfmon_event_map),
+	.read_pmc	= sparc_vt_read_pmc,
+	.write_pmc	= sparc_m7_write_pmc,
+	.upper_shift	= 5,
+	.lower_shift	= 5,
+	.event_mask	= 0x7ff,
+	.user_bit	= PCR_N4_UTRACE,
+	.priv_bit	= PCR_N4_STRACE,
+
+	/* We explicitly don't support hypervisor tracing. */
+	.hv_bit		= 0,
+
+	.irq_bit	= PCR_N4_TOE,
+	.upper_nop	= 0,
+	.lower_nop	= 0,
+	.flags		= 0,
+	.max_hw_events	= 4,
+	.num_pcrs	= 4,
+	.num_pic_regs	= 4,
+};
 static const struct sparc_pmu *sparc_pmu __read_mostly;
 static const struct sparc_pmu *sparc_pmu __read_mostly;
 
 
 static u64 event_encoding(u64 event_id, int idx)
 static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
 	cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
 	cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
 }
 }
 
 
+static void sparc_pmu_start(struct perf_event *event, int flags);
+
 /* On this PMU each PIC has it's own PCR control register.  */
 /* On this PMU each PIC has it's own PCR control register.  */
 static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
 static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
 {
 {
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
 		struct perf_event *cp = cpuc->event[i];
 		struct perf_event *cp = cpuc->event[i];
 		struct hw_perf_event *hwc = &cp->hw;
 		struct hw_perf_event *hwc = &cp->hw;
 		int idx = hwc->idx;
 		int idx = hwc->idx;
-		u64 enc;
 
 
 		if (cpuc->current_idx[i] != PIC_NO_INDEX)
 		if (cpuc->current_idx[i] != PIC_NO_INDEX)
 			continue;
 			continue;
 
 
-		sparc_perf_event_set_period(cp, hwc, idx);
 		cpuc->current_idx[i] = idx;
 		cpuc->current_idx[i] = idx;
 
 
-		enc = perf_event_get_enc(cpuc->events[i]);
-		cpuc->pcr[idx] &= ~mask_for_index(idx);
-		if (hwc->state & PERF_HES_STOPPED)
-			cpuc->pcr[idx] |= nop_for_index(idx);
-		else
-			cpuc->pcr[idx] |= event_encoding(enc, idx);
+		sparc_pmu_start(cp, PERF_EF_RELOAD);
 	}
 	}
 out:
 out:
 	for (i = 0; i < cpuc->n_events; i++) {
 	for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
 	int i;
 	int i;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
-	perf_pmu_disable(event->pmu);
 
 
 	for (i = 0; i < cpuc->n_events; i++) {
 	for (i = 0; i < cpuc->n_events; i++) {
 		if (event == cpuc->event[i]) {
 		if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
 		}
 		}
 	}
 	}
 
 
-	perf_pmu_enable(event->pmu);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
-	perf_pmu_disable(event->pmu);
 
 
 	n0 = cpuc->n_events;
 	n0 = cpuc->n_events;
 	if (n0 >= sparc_pmu->max_hw_events)
 	if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
 
 
 	ret = 0;
 	ret = 0;
 out:
 out:
-	perf_pmu_enable(event->pmu);
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 	return ret;
 	return ret;
 }
 }
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
 		sparc_pmu = &niagara4_pmu;
 		sparc_pmu = &niagara4_pmu;
 		return true;
 		return true;
 	}
 	}
+	if (!strcmp(sparc_pmu_type, "sparc-m7")) {
+		sparc_pmu = &sparc_m7_pmu;
+		return true;
+	}
 	return false;
 	return false;
 }
 }
 
 

+ 4 - 0
arch/sparc/kernel/process_64.c

@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
 			printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
 			printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
 			       gp->tpc, gp->o7, gp->i7, gp->rpc);
 			       gp->tpc, gp->o7, gp->i7, gp->rpc);
 		}
 		}
+
+		touch_nmi_watchdog();
 	}
 	}
 
 
 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
 		       (cpu == this_cpu ? '*' : ' '), cpu,
 		       (cpu == this_cpu ? '*' : ' '), cpu,
 		       pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
 		       pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
 		       pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
 		       pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
+
+		touch_nmi_watchdog();
 	}
 	}
 
 
 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));

+ 32 - 3
arch/sparc/lib/memmove.S

@@ -8,9 +8,11 @@
 
 
 	.text
 	.text
 ENTRY(memmove) /* o0=dst o1=src o2=len */
 ENTRY(memmove) /* o0=dst o1=src o2=len */
-	mov		%o0, %g1
+	brz,pn		%o2, 99f
+	 mov		%o0, %g1
+
 	cmp		%o0, %o1
 	cmp		%o0, %o1
-	bleu,pt		%xcc, memcpy
+	bleu,pt		%xcc, 2f
 	 add		%o1, %o2, %g7
 	 add		%o1, %o2, %g7
 	cmp		%g7, %o0
 	cmp		%g7, %o0
 	bleu,pt		%xcc, memcpy
 	bleu,pt		%xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
 	stb		%g7, [%o0]
 	stb		%g7, [%o0]
 	bne,pt		%icc, 1b
 	bne,pt		%icc, 1b
 	 sub		%o0, 1, %o0
 	 sub		%o0, 1, %o0
-
+99:
 	retl
 	retl
 	 mov		%g1, %o0
 	 mov		%g1, %o0
+
+	/* We can't just call memcpy for these memmove cases.  On some
+	 * chips the memcpy uses cache initializing stores and when dst
+	 * and src are close enough, those can clobber the source data
+	 * before we've loaded it in.
+	 */
+2:	or		%o0, %o1, %g7
+	or		%o2, %g7, %g7
+	andcc		%g7, 0x7, %g0
+	bne,pn		%xcc, 4f
+	 nop
+
+3:	ldx		[%o1], %g7
+	add		%o1, 8, %o1
+	subcc		%o2, 8, %o2
+	add		%o0, 8, %o0
+	bne,pt		%icc, 3b
+	 stx		%g7, [%o0 - 0x8]
+	ba,a,pt		%xcc, 99b
+
+4:	ldub		[%o1], %g7
+	add		%o1, 1, %o1
+	subcc		%o2, 1, %o2
+	add		%o0, 1, %o0
+	bne,pt		%icc, 4b
+	 stb		%g7, [%o0 - 0x1]
+	ba,a,pt		%xcc, 99b
 ENDPROC(memmove)
 ENDPROC(memmove)

+ 5 - 5
arch/x86/kernel/cpu/perf_event_intel.c

@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
-	INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
 	/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 	/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
-	INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
 	/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 	/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
-	INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 	EVENT_CONSTRAINT_END
 	EVENT_CONSTRAINT_END
 };
 };
 
 
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
 	if (c)
 	if (c)
 		return c;
 		return c;
 
 
-	c = intel_pebs_constraints(event);
+	c = intel_shared_regs_constraints(cpuc, event);
 	if (c)
 	if (c)
 		return c;
 		return c;
 
 
-	c = intel_shared_regs_constraints(cpuc, event);
+	c = intel_pebs_constraints(event);
 	if (c)
 	if (c)
 		return c;
 		return c;
 
 

+ 29 - 5
arch/x86/kernel/entry_64.S

@@ -364,12 +364,21 @@ system_call_fastpath:
  * Has incomplete stack frame and undefined top of stack.
  * Has incomplete stack frame and undefined top of stack.
  */
  */
 ret_from_sys_call:
 ret_from_sys_call:
-	testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-	jnz int_ret_from_sys_call_fixup	/* Go the the slow path */
-
 	LOCKDEP_SYS_EXIT
 	LOCKDEP_SYS_EXIT
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
+
+	/*
+	 * We must check ti flags with interrupts (or at least preemption)
+	 * off because we must *never* return to userspace without
+	 * processing exit work that is enqueued if we're preempted here.
+	 * In particular, returning to userspace with any of the one-shot
+	 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+	 * very bad.
+	 */
+	testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	jnz int_ret_from_sys_call_fixup	/* Go the the slow path */
+
 	CFI_REMEMBER_STATE
 	CFI_REMEMBER_STATE
 	/*
 	/*
 	 * sysretq will re-enable interrupts:
 	 * sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
 
 
 int_ret_from_sys_call_fixup:
 int_ret_from_sys_call_fixup:
 	FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
 	FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
-	jmp int_ret_from_sys_call
+	jmp int_ret_from_sys_call_irqs_off
 
 
 	/* Do syscall tracing */
 	/* Do syscall tracing */
 tracesys:
 tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
 GLOBAL(int_ret_from_sys_call)
 GLOBAL(int_ret_from_sys_call)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
+int_ret_from_sys_call_irqs_off:
 	movl $_TIF_ALLWORK_MASK,%edi
 	movl $_TIF_ALLWORK_MASK,%edi
 	/* edi:	mask to check */
 	/* edi:	mask to check */
 GLOBAL(int_with_check)
 GLOBAL(int_with_check)
@@ -789,7 +799,21 @@ retint_swapgs:		/* return to user-space */
 	cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp)	/* R11 == RFLAGS */
 	cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp)	/* R11 == RFLAGS */
 	jne opportunistic_sysret_failed
 	jne opportunistic_sysret_failed
 
 
-	testq $X86_EFLAGS_RF,%r11		/* sysret can't restore RF */
+	/*
+	 * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
+	 * restoring TF results in a trap from userspace immediately after
+	 * SYSRET.  This would cause an infinite loop whenever #DB happens
+	 * with register state that satisfies the opportunistic SYSRET
+	 * conditions.  For example, single-stepping this user code:
+	 *
+	 *           movq $stuck_here,%rcx
+	 *           pushfq
+	 *           popq %r11
+	 *   stuck_here:
+	 *
+	 * would never get past 'stuck_here'.
+	 */
+	testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
 	jnz opportunistic_sysret_failed
 	jnz opportunistic_sysret_failed
 
 
 	/* nothing to check for RSP */
 	/* nothing to check for RSP */

+ 1 - 1
arch/x86/kernel/kgdb.c

@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 	{ "bx", 8, offsetof(struct pt_regs, bx) },
 	{ "bx", 8, offsetof(struct pt_regs, bx) },
 	{ "cx", 8, offsetof(struct pt_regs, cx) },
 	{ "cx", 8, offsetof(struct pt_regs, cx) },
 	{ "dx", 8, offsetof(struct pt_regs, dx) },
 	{ "dx", 8, offsetof(struct pt_regs, dx) },
-	{ "si", 8, offsetof(struct pt_regs, dx) },
+	{ "si", 8, offsetof(struct pt_regs, si) },
 	{ "di", 8, offsetof(struct pt_regs, di) },
 	{ "di", 8, offsetof(struct pt_regs, di) },
 	{ "bp", 8, offsetof(struct pt_regs, bp) },
 	{ "bp", 8, offsetof(struct pt_regs, bp) },
 	{ "sp", 8, offsetof(struct pt_regs, sp) },
 	{ "sp", 8, offsetof(struct pt_regs, sp) },

+ 10 - 0
arch/x86/kernel/reboot.c

@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
 		},
 		},
 	},
 	},
 
 
+	/* ASRock */
+	{	/* Handle problems with rebooting on ASRock Q1900DC-ITX */
+		.callback = set_pci_reboot,
+		.ident = "ASRock Q1900DC-ITX",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
+			DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+		},
+	},
+
 	/* ASUS */
 	/* ASUS */
 	{	/* Handle problems with rebooting on ASUS P4S800 */
 	{	/* Handle problems with rebooting on ASUS P4S800 */
 		.callback = set_bios_reboot,
 		.callback = set_bios_reboot,

+ 3 - 1
arch/x86/kvm/ioapic.c

@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
 			struct kvm_ioapic *ioapic, int vector, int trigger_mode)
 			struct kvm_ioapic *ioapic, int vector, int trigger_mode)
 {
 {
 	int i;
 	int i;
+	struct kvm_lapic *apic = vcpu->arch.apic;
 
 
 	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
 	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
 		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
 		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
 		kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
 		kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
 		spin_lock(&ioapic->lock);
 		spin_lock(&ioapic->lock);
 
 
-		if (trigger_mode != IOAPIC_LEVEL_TRIG)
+		if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+		    kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
 			continue;
 			continue;
 
 
 		ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
 		ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);

+ 1 - 2
arch/x86/kvm/lapic.c

@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 
 
 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
 {
 {
-	if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
-	    kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+	if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
 		int trigger_mode;
 		int trigger_mode;
 		if (apic_test_vector(vector, apic->regs + APIC_TMR))
 		if (apic_test_vector(vector, apic->regs + APIC_TMR))
 			trigger_mode = IOAPIC_LEVEL_TRIG;
 			trigger_mode = IOAPIC_LEVEL_TRIG;

+ 5 - 2
arch/x86/kvm/vmx.c

@@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 	if (enable_ept) {
 	if (enable_ept) {
 		/* nested EPT: emulate EPT also to L1 */
 		/* nested EPT: emulate EPT also to L1 */
 		vmx->nested.nested_vmx_secondary_ctls_high |=
 		vmx->nested.nested_vmx_secondary_ctls_high |=
-			SECONDARY_EXEC_ENABLE_EPT |
-			SECONDARY_EXEC_UNRESTRICTED_GUEST;
+			SECONDARY_EXEC_ENABLE_EPT;
 		vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
 		vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
 			 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
 			 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
 			 VMX_EPT_INVEPT_BIT;
 			 VMX_EPT_INVEPT_BIT;
@@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 	} else
 	} else
 		vmx->nested.nested_vmx_ept_caps = 0;
 		vmx->nested.nested_vmx_ept_caps = 0;
 
 
+	if (enable_unrestricted_guest)
+		vmx->nested.nested_vmx_secondary_ctls_high |=
+			SECONDARY_EXEC_UNRESTRICTED_GUEST;
+
 	/* miscellaneous data */
 	/* miscellaneous data */
 	rdmsr(MSR_IA32_VMX_MISC,
 	rdmsr(MSR_IA32_VMX_MISC,
 		vmx->nested.nested_vmx_misc_low,
 		vmx->nested.nested_vmx_misc_low,

+ 9 - 1
arch/x86/xen/p2m.c

@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
 unsigned long xen_max_p2m_pfn __read_mostly;
 unsigned long xen_max_p2m_pfn __read_mostly;
 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 
 
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#else
+#define P2M_LIMIT 0
+#endif
+
 static DEFINE_SPINLOCK(p2m_update_lock);
 static DEFINE_SPINLOCK(p2m_update_lock);
 
 
 static unsigned long *p2m_mid_missing_mfn;
 static unsigned long *p2m_mid_missing_mfn;
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
 void __init xen_vmalloc_p2m_tree(void)
 void __init xen_vmalloc_p2m_tree(void)
 {
 {
 	static struct vm_struct vm;
 	static struct vm_struct vm;
+	unsigned long p2m_limit;
 
 
+	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
 	vm.flags = VM_ALLOC;
 	vm.flags = VM_ALLOC;
-	vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
 			PMD_SIZE * PMDS_PER_MID_PAGE);
 			PMD_SIZE * PMDS_PER_MID_PAGE);
 	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
 	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
 	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
 	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);

+ 1 - 1
block/blk-merge.c

@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
 	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
 		struct bio_vec *bprev;
 		struct bio_vec *bprev;
 
 
-		bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+		bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
 		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
 		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
 			return false;
 			return false;
 	}
 	}

+ 4 - 2
block/blk-mq-tag.c

@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
 		/*
 		/*
 		 * We're out of tags on this hardware queue, kick any
 		 * We're out of tags on this hardware queue, kick any
 		 * pending IO submits before going to sleep waiting for
 		 * pending IO submits before going to sleep waiting for
-		 * some to complete.
+		 * some to complete. Note that hctx can be NULL here for
+		 * reserved tag allocation.
 		 */
 		 */
-		blk_mq_run_hw_queue(hctx, false);
+		if (hctx)
+			blk_mq_run_hw_queue(hctx, false);
 
 
 		/*
 		/*
 		 * Retry tag allocation after running the hardware queue,
 		 * Retry tag allocation after running the hardware queue,

+ 3 - 3
block/blk-mq.c

@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 	 */
 	 */
 	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
 	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
 			    PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
 			    PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-		goto err_map;
+		goto err_mq_usage;
 
 
 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
 	blk_queue_rq_timeout(q, 30000);
 	blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
 
 	if (blk_mq_init_hw_queues(q, set))
 	if (blk_mq_init_hw_queues(q, set))
-		goto err_hw;
+		goto err_mq_usage;
 
 
 	mutex_lock(&all_q_mutex);
 	mutex_lock(&all_q_mutex);
 	list_add_tail(&q->all_q_node, &all_q_list);
 	list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
 
 	return q;
 	return q;
 
 
-err_hw:
+err_mq_usage:
 	blk_cleanup_queue(q);
 	blk_cleanup_queue(q);
 err_hctxs:
 err_hctxs:
 	kfree(map);
 	kfree(map);

+ 3 - 3
block/blk-settings.c

@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 				     b->physical_block_size);
 				     b->physical_block_size);
 
 
 	t->io_min = max(t->io_min, b->io_min);
 	t->io_min = max(t->io_min, b->io_min);
-	t->io_opt = lcm(t->io_opt, b->io_opt);
+	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 
 
 	t->cluster &= b->cluster;
 	t->cluster &= b->cluster;
 	t->discard_zeroes_data &= b->discard_zeroes_data;
 	t->discard_zeroes_data &= b->discard_zeroes_data;
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 		    b->raid_partial_stripes_expensive);
 		    b->raid_partial_stripes_expensive);
 
 
 	/* Find lowest common alignment_offset */
 	/* Find lowest common alignment_offset */
-	t->alignment_offset = lcm(t->alignment_offset, alignment)
+	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
 		% max(t->physical_block_size, t->io_min);
 		% max(t->physical_block_size, t->io_min);
 
 
 	/* Verify that new alignment_offset is on a logical block boundary */
 	/* Verify that new alignment_offset is on a logical block boundary */
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 						      b->max_discard_sectors);
 						      b->max_discard_sectors);
 		t->discard_granularity = max(t->discard_granularity,
 		t->discard_granularity = max(t->discard_granularity,
 					     b->discard_granularity);
 					     b->discard_granularity);
-		t->discard_alignment = lcm(t->discard_alignment, alignment) %
+		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
 			t->discard_granularity;
 			t->discard_granularity;
 	}
 	}
 
 

+ 15 - 4
drivers/ata/libata-core.c

@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
 
 
 	/* devices that don't properly handle queued TRIM commands */
 	/* devices that don't properly handle queued TRIM commands */
-	{ "Micron_M[56]*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Micron_M5[15]0*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Samsung SSD 850 PRO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
-	{ "Crucial_CT*SSD*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
 
 
 	/*
 	/*
 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	 */
 	 */
 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
 
 
+	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
 		return NULL;
 		return NULL;
 
 
 	/* libsas case */
 	/* libsas case */
-	if (!ap->scsi_host) {
+	if (ap->flags & ATA_FLAG_SAS_HOST) {
 		tag = ata_sas_allocate_tag(ap);
 		tag = ata_sas_allocate_tag(ap);
 		if (tag < 0)
 		if (tag < 0)
 			return NULL;
 			return NULL;
@@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
 	tag = qc->tag;
 	tag = qc->tag;
 	if (likely(ata_tag_valid(tag))) {
 	if (likely(ata_tag_valid(tag))) {
 		qc->tag = ATA_TAG_POISON;
 		qc->tag = ATA_TAG_POISON;
-		if (!ap->scsi_host)
+		if (ap->flags & ATA_FLAG_SAS_HOST)
 			ata_sas_free_tag(tag, ap);
 			ata_sas_free_tag(tag, ap);
 	}
 	}
 }
 }

+ 8 - 0
drivers/base/regmap/internal.h

@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
 extern struct regcache_ops regcache_lzo_ops;
 extern struct regcache_ops regcache_lzo_ops;
 extern struct regcache_ops regcache_flat_ops;
 extern struct regcache_ops regcache_flat_ops;
 
 
+static inline const char *regmap_name(const struct regmap *map)
+{
+	if (map->dev)
+		return dev_name(map->dev);
+
+	return map->name;
+}
+
 #endif
 #endif

+ 8 - 8
drivers/base/regmap/regcache.c

@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
 		ret = map->cache_ops->read(map, reg, value);
 		ret = map->cache_ops->read(map, reg, value);
 
 
 		if (ret == 0)
 		if (ret == 0)
-			trace_regmap_reg_read_cache(map->dev, reg, *value);
+			trace_regmap_reg_read_cache(map, reg, *value);
 
 
 		return ret;
 		return ret;
 	}
 	}
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
 	dev_dbg(map->dev, "Syncing %s cache\n",
 	dev_dbg(map->dev, "Syncing %s cache\n",
 		map->cache_ops->name);
 		map->cache_ops->name);
 	name = map->cache_ops->name;
 	name = map->cache_ops->name;
-	trace_regcache_sync(map->dev, name, "start");
+	trace_regcache_sync(map, name, "start");
 
 
 	if (!map->cache_dirty)
 	if (!map->cache_dirty)
 		goto out;
 		goto out;
@@ -346,7 +346,7 @@ out:
 
 
 	regmap_async_complete(map);
 	regmap_async_complete(map);
 
 
-	trace_regcache_sync(map->dev, name, "stop");
+	trace_regcache_sync(map, name, "stop");
 
 
 	return ret;
 	return ret;
 }
 }
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
 	name = map->cache_ops->name;
 	name = map->cache_ops->name;
 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
 
 
-	trace_regcache_sync(map->dev, name, "start region");
+	trace_regcache_sync(map, name, "start region");
 
 
 	if (!map->cache_dirty)
 	if (!map->cache_dirty)
 		goto out;
 		goto out;
@@ -401,7 +401,7 @@ out:
 
 
 	regmap_async_complete(map);
 	regmap_async_complete(map);
 
 
-	trace_regcache_sync(map->dev, name, "stop region");
+	trace_regcache_sync(map, name, "stop region");
 
 
 	return ret;
 	return ret;
 }
 }
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
 
 
 	map->lock(map->lock_arg);
 	map->lock(map->lock_arg);
 
 
-	trace_regcache_drop_region(map->dev, min, max);
+	trace_regcache_drop_region(map, min, max);
 
 
 	ret = map->cache_ops->drop(map, min, max);
 	ret = map->cache_ops->drop(map, min, max);
 
 
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
 	map->lock(map->lock_arg);
 	map->lock(map->lock_arg);
 	WARN_ON(map->cache_bypass && enable);
 	WARN_ON(map->cache_bypass && enable);
 	map->cache_only = enable;
 	map->cache_only = enable;
-	trace_regmap_cache_only(map->dev, enable);
+	trace_regmap_cache_only(map, enable);
 	map->unlock(map->lock_arg);
 	map->unlock(map->lock_arg);
 }
 }
 EXPORT_SYMBOL_GPL(regcache_cache_only);
 EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
 	map->lock(map->lock_arg);
 	map->lock(map->lock_arg);
 	WARN_ON(map->cache_only && enable);
 	WARN_ON(map->cache_only && enable);
 	map->cache_bypass = enable;
 	map->cache_bypass = enable;
-	trace_regmap_cache_bypass(map->dev, enable);
+	trace_regmap_cache_bypass(map, enable);
 	map->unlock(map->lock_arg);
 	map->unlock(map->lock_arg);
 }
 }
 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
 EXPORT_SYMBOL_GPL(regcache_cache_bypass);

+ 14 - 18
drivers/base/regmap/regmap.c

@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 	if (map->async && map->bus->async_write) {
 	if (map->async && map->bus->async_write) {
 		struct regmap_async *async;
 		struct regmap_async *async;
 
 
-		trace_regmap_async_write_start(map->dev, reg, val_len);
+		trace_regmap_async_write_start(map, reg, val_len);
 
 
 		spin_lock_irqsave(&map->async_lock, flags);
 		spin_lock_irqsave(&map->async_lock, flags);
 		async = list_first_entry_or_null(&map->async_free,
 		async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 		return ret;
 		return ret;
 	}
 	}
 
 
-	trace_regmap_hw_write_start(map->dev, reg,
-				    val_len / map->format.val_bytes);
+	trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
 
 
 	/* If we're doing a single register write we can probably just
 	/* If we're doing a single register write we can probably just
 	 * send the work_buf directly, otherwise try to do a gather
 	 * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 		kfree(buf);
 		kfree(buf);
 	}
 	}
 
 
-	trace_regmap_hw_write_done(map->dev, reg,
-				   val_len / map->format.val_bytes);
+	trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
 
 
 	map->format.format_write(map, reg, val);
 	map->format.format_write(map, reg, val);
 
 
-	trace_regmap_hw_write_start(map->dev, reg, 1);
+	trace_regmap_hw_write_start(map, reg, 1);
 
 
 	ret = map->bus->write(map->bus_context, map->work_buf,
 	ret = map->bus->write(map->bus_context, map->work_buf,
 			      map->format.buf_size);
 			      map->format.buf_size);
 
 
-	trace_regmap_hw_write_done(map->dev, reg, 1);
+	trace_regmap_hw_write_done(map, reg, 1);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
 		dev_info(map->dev, "%x <= %x\n", reg, val);
 		dev_info(map->dev, "%x <= %x\n", reg, val);
 #endif
 #endif
 
 
-	trace_regmap_reg_write(map->dev, reg, val);
+	trace_regmap_reg_write(map, reg, val);
 
 
 	return map->reg_write(context, reg, val);
 	return map->reg_write(context, reg, val);
 }
 }
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
 	for (i = 0; i < num_regs; i++) {
 	for (i = 0; i < num_regs; i++) {
 		int reg = regs[i].reg;
 		int reg = regs[i].reg;
 		int val = regs[i].def;
 		int val = regs[i].def;
-		trace_regmap_hw_write_start(map->dev, reg, 1);
+		trace_regmap_hw_write_start(map, reg, 1);
 		map->format.format_reg(u8, reg, map->reg_shift);
 		map->format.format_reg(u8, reg, map->reg_shift);
 		u8 += reg_bytes + pad_bytes;
 		u8 += reg_bytes + pad_bytes;
 		map->format.format_val(u8, val, 0);
 		map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
 
 
 	for (i = 0; i < num_regs; i++) {
 	for (i = 0; i < num_regs; i++) {
 		int reg = regs[i].reg;
 		int reg = regs[i].reg;
-		trace_regmap_hw_write_done(map->dev, reg, 1);
+		trace_regmap_hw_write_done(map, reg, 1);
 	}
 	}
 	return ret;
 	return ret;
 }
 }
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
 	 */
 	 */
 	u8[0] |= map->read_flag_mask;
 	u8[0] |= map->read_flag_mask;
 
 
-	trace_regmap_hw_read_start(map->dev, reg,
-				   val_len / map->format.val_bytes);
+	trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
 
 
 	ret = map->bus->read(map->bus_context, map->work_buf,
 	ret = map->bus->read(map->bus_context, map->work_buf,
 			     map->format.reg_bytes + map->format.pad_bytes,
 			     map->format.reg_bytes + map->format.pad_bytes,
 			     val, val_len);
 			     val, val_len);
 
 
-	trace_regmap_hw_read_done(map->dev, reg,
-				  val_len / map->format.val_bytes);
+	trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
 			dev_info(map->dev, "%x => %x\n", reg, *val);
 			dev_info(map->dev, "%x => %x\n", reg, *val);
 #endif
 #endif
 
 
-		trace_regmap_reg_read(map->dev, reg, *val);
+		trace_regmap_reg_read(map, reg, *val);
 
 
 		if (!map->cache_bypass)
 		if (!map->cache_bypass)
 			regcache_write(map, reg, *val);
 			regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
 	struct regmap *map = async->map;
 	struct regmap *map = async->map;
 	bool wake;
 	bool wake;
 
 
-	trace_regmap_async_io_complete(map->dev);
+	trace_regmap_async_io_complete(map);
 
 
 	spin_lock(&map->async_lock);
 	spin_lock(&map->async_lock);
 	list_move(&async->list, &map->async_free);
 	list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
 	if (!map->bus || !map->bus->async_write)
 	if (!map->bus || !map->bus->async_write)
 		return 0;
 		return 0;
 
 
-	trace_regmap_async_complete_start(map->dev);
+	trace_regmap_async_complete_start(map);
 
 
 	wait_event(map->async_waitq, regmap_async_is_done(map));
 	wait_event(map->async_waitq, regmap_async_is_done(map));
 
 
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
 	map->async_ret = 0;
 	map->async_ret = 0;
 	spin_unlock_irqrestore(&map->async_lock, flags);
 	spin_unlock_irqrestore(&map->async_lock, flags);
 
 
-	trace_regmap_async_complete_done(map->dev);
+	trace_regmap_async_complete_done(map);
 
 
 	return ret;
 	return ret;
 }
 }

+ 4 - 4
drivers/block/nbd.c

@@ -803,10 +803,6 @@ static int __init nbd_init(void)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
-	if (!nbd_dev)
-		return -ENOMEM;
-
 	part_shift = 0;
 	part_shift = 0;
 	if (max_part > 0) {
 	if (max_part > 0) {
 		part_shift = fls(max_part);
 		part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
 	if (nbds_max > 1UL << (MINORBITS - part_shift))
 	if (nbds_max > 1UL << (MINORBITS - part_shift))
 		return -EINVAL;
 		return -EINVAL;
 
 
+	nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+	if (!nbd_dev)
+		return -ENOMEM;
+
 	for (i = 0; i < nbds_max; i++) {
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = alloc_disk(1 << part_shift);
 		struct gendisk *disk = alloc_disk(1 << part_shift);
 		if (!disk)
 		if (!disk)

+ 1 - 0
drivers/block/nvme-core.c

@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	}
 	}
 	get_device(dev->device);
 	get_device(dev->device);
 
 
+	INIT_LIST_HEAD(&dev->node);
 	INIT_WORK(&dev->probe_work, nvme_async_probe);
 	INIT_WORK(&dev->probe_work, nvme_async_probe);
 	schedule_work(&dev->probe_work);
 	schedule_work(&dev->probe_work);
 	return 0;
 	return 0;

+ 3 - 0
drivers/clocksource/Kconfig

@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
 config SH_TIMER_CMT
 config SH_TIMER_CMT
 	bool "Renesas CMT timer driver" if COMPILE_TEST
 	bool "Renesas CMT timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	default SYS_SUPPORTS_SH_CMT
 	default SYS_SUPPORTS_SH_CMT
 	help
 	help
 	  This enables build of a clocksource and clockevent driver for
 	  This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
 config SH_TIMER_MTU2
 config SH_TIMER_MTU2
 	bool "Renesas MTU2 timer driver" if COMPILE_TEST
 	bool "Renesas MTU2 timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	default SYS_SUPPORTS_SH_MTU2
 	default SYS_SUPPORTS_SH_MTU2
 	help
 	help
 	  This enables build of a clockevent driver for the Multi-Function
 	  This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
 config SH_TIMER_TMU
 config SH_TIMER_TMU
 	bool "Renesas TMU timer driver" if COMPILE_TEST
 	bool "Renesas TMU timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
+	depends on HAS_IOMEM
 	default SYS_SUPPORTS_SH_TMU
 	default SYS_SUPPORTS_SH_TMU
 	help
 	help
 	  This enables build of a clocksource and clockevent driver for
 	  This enables build of a clocksource and clockevent driver for

+ 0 - 7
drivers/clocksource/timer-sun5i.c

@@ -17,7 +17,6 @@
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <linux/irqreturn.h>
 #include <linux/irqreturn.h>
 #include <linux/reset.h>
 #include <linux/reset.h>
-#include <linux/sched_clock.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
 	.dev_id = &sun5i_clockevent,
 	.dev_id = &sun5i_clockevent,
 };
 };
 
 
-static u64 sun5i_timer_sched_read(void)
-{
-	return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
-}
-
 static void __init sun5i_timer_init(struct device_node *node)
 static void __init sun5i_timer_init(struct device_node *node)
 {
 {
 	struct reset_control *rstc;
 	struct reset_control *rstc;
@@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
 	writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
 	writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
 	       timer_base + TIMER_CTL_REG(1));
 	       timer_base + TIMER_CTL_REG(1));
 
 
-	sched_clock_register(sun5i_timer_sched_read, 32, rate);
 	clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
 	clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
 			      rate, 340, 32, clocksource_mmio_readl_down);
 			      rate, 340, 32, clocksource_mmio_readl_down);
 
 

+ 1 - 0
drivers/dma/bcm2835-dma.c

@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
 	 * c->desc is NULL and exit.)
 	 * c->desc is NULL and exit.)
 	 */
 	 */
 	if (c->desc) {
 	if (c->desc) {
+		bcm2835_dma_desc_free(&c->desc->vd);
 		c->desc = NULL;
 		c->desc = NULL;
 		bcm2835_dma_abort(c->chan_base);
 		bcm2835_dma_abort(c->chan_base);
 
 

+ 7 - 0
drivers/dma/dma-jz4740.c

@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
 	kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
 	kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
 }
 }
 
 
+#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
 static int jz4740_dma_probe(struct platform_device *pdev)
 static int jz4740_dma_probe(struct platform_device *pdev)
 {
 {
 	struct jz4740_dmaengine_chan *chan;
 	struct jz4740_dmaengine_chan *chan;
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 	dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
 	dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
 	dd->device_config = jz4740_dma_slave_config;
 	dd->device_config = jz4740_dma_slave_config;
 	dd->device_terminate_all = jz4740_dma_terminate_all;
 	dd->device_terminate_all = jz4740_dma_terminate_all;
+	dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
+	dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
+	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 	dd->dev = &pdev->dev;
 	dd->dev = &pdev->dev;
 	INIT_LIST_HEAD(&dd->channels);
 	INIT_LIST_HEAD(&dd->channels);
 
 

+ 7 - 0
drivers/dma/edma.c

@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
 	 */
 	 */
 	if (echan->edesc) {
 	if (echan->edesc) {
 		int cyclic = echan->edesc->cyclic;
 		int cyclic = echan->edesc->cyclic;
+
+		/*
+		 * free the running request descriptor
+		 * since it is not in any of the vdesc lists
+		 */
+		edma_desc_free(&echan->edesc->vdesc);
+
 		echan->edesc = NULL;
 		echan->edesc = NULL;
 		edma_stop(echan->ch_num);
 		edma_stop(echan->ch_num);
 		/* Move the cyclic channel back to default queue */
 		/* Move the cyclic channel back to default queue */

+ 3 - 1
drivers/dma/moxart-dma.c

@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
 
 
 	spin_lock_irqsave(&ch->vc.lock, flags);
 	spin_lock_irqsave(&ch->vc.lock, flags);
 
 
-	if (ch->desc)
+	if (ch->desc) {
+		moxart_dma_desc_free(&ch->desc->vd);
 		ch->desc = NULL;
 		ch->desc = NULL;
+	}
 
 
 	ctrl = readl(ch->base + REG_OFF_CTRL);
 	ctrl = readl(ch->base + REG_OFF_CTRL);
 	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
 	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);

+ 1 - 0
drivers/dma/omap-dma.c

@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
 	 * c->desc is NULL and exit.)
 	 * c->desc is NULL and exit.)
 	 */
 	 */
 	if (c->desc) {
 	if (c->desc) {
+		omap_dma_desc_free(&c->desc->vd);
 		c->desc = NULL;
 		c->desc = NULL;
 		/* Avoid stopping the dma twice */
 		/* Avoid stopping the dma twice */
 		if (!c->paused)
 		if (!c->paused)

+ 7 - 15
drivers/firmware/dmi_scan.c

@@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
 	int i = 0;
 	int i = 0;
 
 
 	/*
 	/*
-	 *	Stop when we see all the items the table claimed to have
-	 *	OR we run off the end of the table (also happens)
+	 * Stop when we have seen all the items the table claimed to have
+	 * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
+	 * off the end of the table (should never happen but sometimes does
+	 * on bogus implementations.)
 	 */
 	 */
-	while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
+	while ((!num || i < num) &&
+	       (data - buf + sizeof(struct dmi_header)) <= len) {
 		const struct dmi_header *dm = (const struct dmi_header *)data;
 		const struct dmi_header *dm = (const struct dmi_header *)data;
 
 
 		/*
 		/*
@@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
 	if (memcmp(buf, "_SM3_", 5) == 0 &&
 	if (memcmp(buf, "_SM3_", 5) == 0 &&
 	    buf[6] < 32 && dmi_checksum(buf, buf[6])) {
 	    buf[6] < 32 && dmi_checksum(buf, buf[6])) {
 		dmi_ver = get_unaligned_be16(buf + 7);
 		dmi_ver = get_unaligned_be16(buf + 7);
+		dmi_num = 0;			/* No longer specified */
 		dmi_len = get_unaligned_le32(buf + 12);
 		dmi_len = get_unaligned_le32(buf + 12);
 		dmi_base = get_unaligned_le64(buf + 16);
 		dmi_base = get_unaligned_le64(buf + 16);
 
 
-		/*
-		 * The 64-bit SMBIOS 3.0 entry point no longer has a field
-		 * containing the number of structures present in the table.
-		 * Instead, it defines the table size as a maximum size, and
-		 * relies on the end-of-table structure type (#127) to be used
-		 * to signal the end of the table.
-		 * So let's define dmi_num as an upper bound as well: each
-		 * structure has a 4 byte header, so dmi_len / 4 is an upper
-		 * bound for the number of structures in the table.
-		 */
-		dmi_num = dmi_len / 4;
-
 		if (dmi_walk_early(dmi_decode) == 0) {
 		if (dmi_walk_early(dmi_decode) == 0) {
 			pr_info("SMBIOS %d.%d present.\n",
 			pr_info("SMBIOS %d.%d present.\n",
 				dmi_ver >> 8, dmi_ver & 0xFF);
 				dmi_ver >> 8, dmi_ver & 0xFF);

+ 1 - 1
drivers/gpio/gpio-mpc8xxx.c

@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
 	.xlate	= irq_domain_xlate_twocell,
 	.xlate	= irq_domain_xlate_twocell,
 };
 };
 
 
-static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+static struct of_device_id mpc8xxx_gpio_ids[] = {
 	{ .compatible = "fsl,mpc8349-gpio", },
 	{ .compatible = "fsl,mpc8349-gpio", },
 	{ .compatible = "fsl,mpc8572-gpio", },
 	{ .compatible = "fsl,mpc8572-gpio", },
 	{ .compatible = "fsl,mpc8610-gpio", },
 	{ .compatible = "fsl,mpc8610-gpio", },

+ 1 - 1
drivers/gpio/gpio-syscon.c

@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
 		ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
 		ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
 						 &priv->dir_reg_offset);
 						 &priv->dir_reg_offset);
 		if (ret)
 		if (ret)
-			dev_err(dev, "can't read the dir register offset!\n");
+			dev_dbg(dev, "can't read the dir register offset!\n");
 
 
 		priv->dir_reg_offset <<= 3;
 		priv->dir_reg_offset <<= 3;
 	}
 	}

+ 10 - 0
drivers/gpio/gpiolib-acpi.c

@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 	if (!handler)
 	if (!handler)
 		return AE_BAD_PARAMETER;
 		return AE_BAD_PARAMETER;
 
 
+	pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+	if (pin < 0)
+		return AE_BAD_PARAMETER;
+
 	desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
 	desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
 	if (IS_ERR(desc)) {
 	if (IS_ERR(desc)) {
 		dev_err(chip->dev, "Failed to request GPIO\n");
 		dev_err(chip->dev, "Failed to request GPIO\n");
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
 		struct gpio_desc *desc;
 		struct gpio_desc *desc;
 		bool found;
 		bool found;
 
 
+		pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+		if (pin < 0) {
+			status = AE_BAD_PARAMETER;
+			goto out;
+		}
+
 		mutex_lock(&achip->conn_lock);
 		mutex_lock(&achip->conn_lock);
 
 
 		found = false;
 		found = false;

+ 1 - 12
drivers/gpu/drm/drm_crtc.c

@@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
 }
 }
 EXPORT_SYMBOL(drm_framebuffer_reference);
 EXPORT_SYMBOL(drm_framebuffer_reference);
 
 
-static void drm_framebuffer_free_bug(struct kref *kref)
-{
-	BUG();
-}
-
-static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
-	DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
-	kref_put(&fb->refcount, drm_framebuffer_free_bug);
-}
-
 /**
 /**
  * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
  * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
  * @fb: fb to unregister
  * @fb: fb to unregister
@@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
 		return;
 		return;
 	}
 	}
 	/* disconnect the plane from the fb and crtc: */
 	/* disconnect the plane from the fb and crtc: */
-	__drm_framebuffer_unreference(plane->old_fb);
+	drm_framebuffer_unreference(plane->old_fb);
 	plane->old_fb = NULL;
 	plane->old_fb = NULL;
 	plane->fb = NULL;
 	plane->fb = NULL;
 	plane->crtc = NULL;
 	plane->crtc = NULL;

+ 1 - 0
drivers/gpu/drm/drm_edid_load.c

@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
 
 
 	drm_mode_connector_update_edid_property(connector, edid);
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
+	drm_edid_to_eld(connector, edid);
 	kfree(edid);
 	kfree(edid);
 
 
 	return ret;
 	return ret;

+ 1 - 0
drivers/gpu/drm/drm_probe_helper.c

@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 			struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 			struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 
 
 			count = drm_add_edid_modes(connector, edid);
 			count = drm_add_edid_modes(connector, edid);
+			drm_edid_to_eld(connector, edid);
 		} else
 		} else
 			count = (*connector_funcs->get_modes)(connector);
 			count = (*connector_funcs->get_modes)(connector);
 	}
 	}

+ 5 - 3
drivers/gpu/drm/exynos/exynos_drm_fimd.c

@@ -147,6 +147,7 @@ struct fimd_win_data {
 	unsigned int		ovl_height;
 	unsigned int		ovl_height;
 	unsigned int		fb_width;
 	unsigned int		fb_width;
 	unsigned int		fb_height;
 	unsigned int		fb_height;
+	unsigned int		fb_pitch;
 	unsigned int		bpp;
 	unsigned int		bpp;
 	unsigned int		pixel_format;
 	unsigned int		pixel_format;
 	dma_addr_t		dma_addr;
 	dma_addr_t		dma_addr;
@@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
 	win_data->offset_y = plane->crtc_y;
 	win_data->offset_y = plane->crtc_y;
 	win_data->ovl_width = plane->crtc_width;
 	win_data->ovl_width = plane->crtc_width;
 	win_data->ovl_height = plane->crtc_height;
 	win_data->ovl_height = plane->crtc_height;
+	win_data->fb_pitch = plane->pitch;
 	win_data->fb_width = plane->fb_width;
 	win_data->fb_width = plane->fb_width;
 	win_data->fb_height = plane->fb_height;
 	win_data->fb_height = plane->fb_height;
 	win_data->dma_addr = plane->dma_addr[0] + offset;
 	win_data->dma_addr = plane->dma_addr[0] + offset;
 	win_data->bpp = plane->bpp;
 	win_data->bpp = plane->bpp;
 	win_data->pixel_format = plane->pixel_format;
 	win_data->pixel_format = plane->pixel_format;
-	win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
-				(plane->bpp >> 3);
+	win_data->buf_offsize =
+		plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
 	win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
 	win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
 
 
 	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
 	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
 	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
 
 	/* buffer end address */
 	/* buffer end address */
-	size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+	size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
 	val = (unsigned long)(win_data->dma_addr + size);
 	val = (unsigned long)(win_data->dma_addr + size);
 	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
 

+ 10 - 7
drivers/gpu/drm/exynos/exynos_mixer.c

@@ -55,6 +55,7 @@ struct hdmi_win_data {
 	unsigned int		fb_x;
 	unsigned int		fb_x;
 	unsigned int		fb_y;
 	unsigned int		fb_y;
 	unsigned int		fb_width;
 	unsigned int		fb_width;
+	unsigned int		fb_pitch;
 	unsigned int		fb_height;
 	unsigned int		fb_height;
 	unsigned int		src_width;
 	unsigned int		src_width;
 	unsigned int		src_height;
 	unsigned int		src_height;
@@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 	} else {
 	} else {
 		luma_addr[0] = win_data->dma_addr;
 		luma_addr[0] = win_data->dma_addr;
 		chroma_addr[0] = win_data->dma_addr
 		chroma_addr[0] = win_data->dma_addr
-			+ (win_data->fb_width * win_data->fb_height);
+			+ (win_data->fb_pitch * win_data->fb_height);
 	}
 	}
 
 
 	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
 	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 			luma_addr[1] = luma_addr[0] + 0x40;
 			luma_addr[1] = luma_addr[0] + 0x40;
 			chroma_addr[1] = chroma_addr[0] + 0x40;
 			chroma_addr[1] = chroma_addr[0] + 0x40;
 		} else {
 		} else {
-			luma_addr[1] = luma_addr[0] + win_data->fb_width;
-			chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+			luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
+			chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
 		}
 		}
 	} else {
 	} else {
 		ctx->interlace = false;
 		ctx->interlace = false;
@@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 
 
 	/* setting size of input image */
 	/* setting size of input image */
-	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
 		VP_IMG_VSIZE(win_data->fb_height));
 		VP_IMG_VSIZE(win_data->fb_height));
 	/* chroma height has to reduced by 2 to avoid chroma distorions */
 	/* chroma height has to reduced by 2 to avoid chroma distorions */
-	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
 		VP_IMG_VSIZE(win_data->fb_height / 2));
 		VP_IMG_VSIZE(win_data->fb_height / 2));
 
 
 	vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
 	vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 	/* converting dma address base and source offset */
 	/* converting dma address base and source offset */
 	dma_addr = win_data->dma_addr
 	dma_addr = win_data->dma_addr
 		+ (win_data->fb_x * win_data->bpp >> 3)
 		+ (win_data->fb_x * win_data->bpp >> 3)
-		+ (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+		+ (win_data->fb_y * win_data->fb_pitch);
 	src_x_offset = 0;
 	src_x_offset = 0;
 	src_y_offset = 0;
 	src_y_offset = 0;
 
 
@@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 		MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
 		MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
 
 
 	/* setup geometry */
 	/* setup geometry */
-	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+			win_data->fb_pitch / (win_data->bpp >> 3));
 
 
 	/* setup display size */
 	/* setup display size */
 	if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
 	if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
 	win_data->fb_y = plane->fb_y;
 	win_data->fb_y = plane->fb_y;
 	win_data->fb_width = plane->fb_width;
 	win_data->fb_width = plane->fb_width;
 	win_data->fb_height = plane->fb_height;
 	win_data->fb_height = plane->fb_height;
+	win_data->fb_pitch = plane->pitch;
 	win_data->src_width = plane->src_width;
 	win_data->src_width = plane->src_width;
 	win_data->src_height = plane->src_height;
 	win_data->src_height = plane->src_height;
 
 

+ 21 - 17
drivers/gpu/drm/i915/i915_gem.c

@@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
 
 	WARN_ON(i915_verify_lists(ring->dev));
 	WARN_ON(i915_verify_lists(ring->dev));
 
 
-	/* Move any buffers on the active list that are no longer referenced
-	 * by the ringbuffer to the flushing/inactive lists as appropriate,
-	 * before we free the context associated with the requests.
+	/* Retire requests first as we use it above for the early return.
+	 * If we retire requests last, we may use a later seqno and so clear
+	 * the requests lists without clearing the active list, leading to
+	 * confusion.
 	 */
 	 */
-	while (!list_empty(&ring->active_list)) {
-		struct drm_i915_gem_object *obj;
-
-		obj = list_first_entry(&ring->active_list,
-				      struct drm_i915_gem_object,
-				      ring_list);
-
-		if (!i915_gem_request_completed(obj->last_read_req, true))
-			break;
-
-		i915_gem_object_move_to_inactive(obj);
-	}
-
-
 	while (!list_empty(&ring->request_list)) {
 	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 		struct drm_i915_gem_request *request;
 		struct intel_ringbuffer *ringbuf;
 		struct intel_ringbuffer *ringbuf;
@@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 		i915_gem_free_request(request);
 		i915_gem_free_request(request);
 	}
 	}
 
 
+	/* Move any buffers on the active list that are no longer referenced
+	 * by the ringbuffer to the flushing/inactive lists as appropriate,
+	 * before we free the context associated with the requests.
+	 */
+	while (!list_empty(&ring->active_list)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&ring->active_list,
+				      struct drm_i915_gem_object,
+				      ring_list);
+
+		if (!i915_gem_request_completed(obj->last_read_req, true))
+			break;
+
+		i915_gem_object_move_to_inactive(obj);
+	}
+
 	if (unlikely(ring->trace_irq_req &&
 	if (unlikely(ring->trace_irq_req &&
 		     i915_gem_request_completed(ring->trace_irq_req, true))) {
 		     i915_gem_request_completed(ring->trace_irq_req, true))) {
 		ring->irq_put(ring);
 		ring->irq_put(ring);

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto err;
 		goto err;
 	}
 	}
 
 
-	if (i915_needs_cmd_parser(ring)) {
+	if (i915_needs_cmd_parser(ring) && args->batch_len) {
 		batch_obj = i915_gem_execbuffer_parse(ring,
 		batch_obj = i915_gem_execbuffer_parse(ring,
 						      &shadow_exec_entry,
 						      &shadow_exec_entry,
 						      eb,
 						      eb,

+ 13 - 5
drivers/gpu/drm/i915/intel_display.c

@@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
 	if (!intel_crtc->base.primary->fb)
 	if (!intel_crtc->base.primary->fb)
 		return;
 		return;
 
 
-	if (intel_alloc_plane_obj(intel_crtc, plane_config))
+	if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
+		struct drm_plane *primary = intel_crtc->base.primary;
+
+		primary->state->crtc = &intel_crtc->base;
+		primary->crtc = &intel_crtc->base;
+		update_state_fb(primary);
+
 		return;
 		return;
+	}
 
 
 	kfree(intel_crtc->base.primary->fb);
 	kfree(intel_crtc->base.primary->fb);
 	intel_crtc->base.primary->fb = NULL;
 	intel_crtc->base.primary->fb = NULL;
@@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
 			continue;
 			continue;
 
 
 		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
 		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+			struct drm_plane *primary = intel_crtc->base.primary;
+
 			if (obj->tiling_mode != I915_TILING_NONE)
 			if (obj->tiling_mode != I915_TILING_NONE)
 				dev_priv->preserve_bios_swizzle = true;
 				dev_priv->preserve_bios_swizzle = true;
 
 
 			drm_framebuffer_reference(c->primary->fb);
 			drm_framebuffer_reference(c->primary->fb);
-			intel_crtc->base.primary->fb = c->primary->fb;
+			primary->fb = c->primary->fb;
+			primary->state->crtc = &intel_crtc->base;
+			primary->crtc = &intel_crtc->base;
 			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 			obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 			break;
 			break;
 		}
 		}
@@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
 		      plane_config->size);
 		      plane_config->size);
 
 
 	crtc->base.primary->fb = fb;
 	crtc->base.primary->fb = fb;
-	update_state_fb(crtc->base.primary);
 }
 }
 
 
 static void chv_crtc_clock_get(struct intel_crtc *crtc,
 static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 		      plane_config->size);
 		      plane_config->size);
 
 
 	crtc->base.primary->fb = fb;
 	crtc->base.primary->fb = fb;
-	update_state_fb(crtc->base.primary);
 	return;
 	return;
 
 
 error:
 error:
@@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
 		      plane_config->size);
 		      plane_config->size);
 
 
 	crtc->base.primary->fb = fb;
 	crtc->base.primary->fb = fb;
-	update_state_fb(crtc->base.primary);
 }
 }
 
 
 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,

+ 2 - 2
drivers/gpu/drm/i915/intel_sprite.c

@@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);
 
 
 	plane = drm_plane_find(dev, set->plane_id);
 	plane = drm_plane_find(dev, set->plane_id);
-	if (!plane) {
+	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
 		ret = -ENOENT;
 		ret = -ENOENT;
 		goto out_unlock;
 		goto out_unlock;
 	}
 	}
@@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);
 
 
 	plane = drm_plane_find(dev, get->plane_id);
 	plane = drm_plane_find(dev, get->plane_id);
-	if (!plane) {
+	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
 		ret = -ENOENT;
 		ret = -ENOENT;
 		goto out_unlock;
 		goto out_unlock;
 	}
 	}

+ 1 - 0
drivers/gpu/drm/radeon/cikd.h

@@ -2129,6 +2129,7 @@
 #define VCE_UENC_REG_CLOCK_GATING	0x207c0
 #define VCE_UENC_REG_CLOCK_GATING	0x207c0
 #define VCE_SYS_INT_EN			0x21300
 #define VCE_SYS_INT_EN			0x21300
 #	define VCE_SYS_INT_TRAP_INTERRUPT_EN	(1 << 3)
 #	define VCE_SYS_INT_TRAP_INTERRUPT_EN	(1 << 3)
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR	0x2145c
 #define VCE_LMI_CTRL2			0x21474
 #define VCE_LMI_CTRL2			0x21474
 #define VCE_LMI_CTRL			0x21498
 #define VCE_LMI_CTRL			0x21498
 #define VCE_LMI_VM_CTRL			0x214a0
 #define VCE_LMI_VM_CTRL			0x214a0

+ 1 - 0
drivers/gpu/drm/radeon/radeon.h

@@ -1565,6 +1565,7 @@ struct radeon_dpm {
 	int			new_active_crtc_count;
 	int			new_active_crtc_count;
 	u32			current_active_crtcs;
 	u32			current_active_crtcs;
 	int			current_active_crtc_count;
 	int			current_active_crtc_count;
+	bool single_display;
 	struct radeon_dpm_dynamic_state dyn_state;
 	struct radeon_dpm_dynamic_state dyn_state;
 	struct radeon_dpm_fan fan;
 	struct radeon_dpm_fan fan;
 	u32 tdp_limit;
 	u32 tdp_limit;

+ 7 - 3
drivers/gpu/drm/radeon/radeon_bios.c

@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
 
 
 static bool radeon_read_bios(struct radeon_device *rdev)
 static bool radeon_read_bios(struct radeon_device *rdev)
 {
 {
-	uint8_t __iomem *bios;
+	uint8_t __iomem *bios, val1, val2;
 	size_t size;
 	size_t size;
 
 
 	rdev->bios = NULL;
 	rdev->bios = NULL;
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
 		return false;
 		return false;
 	}
 	}
 
 
-	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+	val1 = readb(&bios[0]);
+	val2 = readb(&bios[1]);
+
+	if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
 		pci_unmap_rom(rdev->pdev, bios);
 		pci_unmap_rom(rdev->pdev, bios);
 		return false;
 		return false;
 	}
 	}
-	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+	rdev->bios = kzalloc(size, GFP_KERNEL);
 	if (rdev->bios == NULL) {
 	if (rdev->bios == NULL) {
 		pci_unmap_rom(rdev->pdev, bios);
 		pci_unmap_rom(rdev->pdev, bios);
 		return false;
 		return false;
 	}
 	}
+	memcpy_fromio(rdev->bios, bios, size);
 	pci_unmap_rom(rdev->pdev, bios);
 	pci_unmap_rom(rdev->pdev, bios);
 	return true;
 	return true;
 }
 }

+ 4 - 7
drivers/gpu/drm/radeon/radeon_mn.c

@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 	it = interval_tree_iter_first(&rmn->objects, start, end);
 	it = interval_tree_iter_first(&rmn->objects, start, end);
 	while (it) {
 	while (it) {
 		struct radeon_bo *bo;
 		struct radeon_bo *bo;
-		struct fence *fence;
 		int r;
 		int r;
 
 
 		bo = container_of(it, struct radeon_bo, mn_it);
 		bo = container_of(it, struct radeon_bo, mn_it);
@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 			continue;
 			continue;
 		}
 		}
 
 
-		fence = reservation_object_get_excl(bo->tbo.resv);
-		if (fence) {
-			r = radeon_fence_wait((struct radeon_fence *)fence, false);
-			if (r)
-				DRM_ERROR("(%d) failed to wait for user bo\n", r);
-		}
+		r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
+			false, MAX_SCHEDULE_TIMEOUT);
+		if (r)
+			DRM_ERROR("(%d) failed to wait for user bo\n", r);
 
 
 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);

+ 17 - 5
drivers/gpu/drm/radeon/radeon_pm.c

@@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
 	radeon_pm_compute_clocks(rdev);
 	radeon_pm_compute_clocks(rdev);
 }
 }
 
 
-static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
-						     enum radeon_pm_state_type dpm_state)
+static bool radeon_dpm_single_display(struct radeon_device *rdev)
 {
 {
-	int i;
-	struct radeon_ps *ps;
-	u32 ui_class;
 	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
 	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
 		true : false;
 		true : false;
 
 
@@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
 	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
 	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
 		single_display = false;
 		single_display = false;
 
 
+	return single_display;
+}
+
+static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+						     enum radeon_pm_state_type dpm_state)
+{
+	int i;
+	struct radeon_ps *ps;
+	u32 ui_class;
+	bool single_display = radeon_dpm_single_display(rdev);
+
 	/* certain older asics have a separare 3D performance state,
 	/* certain older asics have a separare 3D performance state,
 	 * so try that first if the user selected performance
 	 * so try that first if the user selected performance
 	 */
 	 */
@@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
 	struct radeon_ps *ps;
 	struct radeon_ps *ps;
 	enum radeon_pm_state_type dpm_state;
 	enum radeon_pm_state_type dpm_state;
 	int ret;
 	int ret;
+	bool single_display = radeon_dpm_single_display(rdev);
 
 
 	/* if dpm init failed */
 	/* if dpm init failed */
 	if (!rdev->pm.dpm_enabled)
 	if (!rdev->pm.dpm_enabled)
@@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
 		/* vce just modifies an existing state so force a change */
 		/* vce just modifies an existing state so force a change */
 		if (ps->vce_active != rdev->pm.dpm.vce_active)
 		if (ps->vce_active != rdev->pm.dpm.vce_active)
 			goto force;
 			goto force;
+		/* user has made a display change (such as timing) */
+		if (rdev->pm.dpm.single_display != single_display)
+			goto force;
 		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
 		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
 			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
 			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
 			 * all we need to do is update the display configuration.
 			 * all we need to do is update the display configuration.
@@ -1069,6 +1080,7 @@ force:
 
 
 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+	rdev->pm.dpm.single_display = single_display;
 
 
 	/* wait for the rings to drain */
 	/* wait for the rings to drain */
 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
 	for (i = 0; i < RADEON_NUM_RINGS; i++) {

+ 1 - 1
drivers/gpu/drm/radeon/radeon_ring.c

@@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
 	seq_printf(m, "%u dwords in ring\n", count);
 	seq_printf(m, "%u dwords in ring\n", count);
 
 
-	if (!ring->ready)
+	if (!ring->ring)
 		return 0;
 		return 0;
 
 
 	/* print 8 dw before current rptr as often it's the last executed
 	/* print 8 dw before current rptr as often it's the last executed

+ 4 - 0
drivers/gpu/drm/radeon/radeon_ttm.c

@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 	enum dma_data_direction direction = write ?
 	enum dma_data_direction direction = write ?
 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
 
+	/* double check that we don't free the table twice */
+	if (!ttm->sg->sgl)
+		return;
+
 	/* free the sg table and pages again */
 	/* free the sg table and pages again */
 	dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 	dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 
 

+ 3 - 0
drivers/gpu/drm/radeon/vce_v2_0.c

@@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev)
 	WREG32(VCE_LMI_SWAP_CNTL1, 0);
 	WREG32(VCE_LMI_SWAP_CNTL1, 0);
 	WREG32(VCE_LMI_VM_CTRL, 0);
 	WREG32(VCE_LMI_VM_CTRL, 0);
 
 
+	WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
+
+	addr &= 0xff;
 	size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
 	size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
 	WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
 	WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
 	WREG32(VCE_VCPU_CACHE_SIZE0, size);
 	WREG32(VCE_VCPU_CACHE_SIZE0, size);

+ 1 - 1
drivers/iio/accel/bma180.c

@@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
 
 
 	mutex_lock(&data->mutex);
 	mutex_lock(&data->mutex);
 
 
-	for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+	for_each_set_bit(bit, indio_dev->active_scan_mask,
 			 indio_dev->masklength) {
 			 indio_dev->masklength) {
 		ret = bma180_get_data_reg(data, bit);
 		ret = bma180_get_data_reg(data, bit);
 		if (ret < 0) {
 		if (ret < 0) {

Some files were not shown because too many files changed in this diff