Эх сурвалжийг харах

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Fun set of conflict resolutions here...

For the mac80211 stuff, these were fortunately just parallel
adds.  Trivially resolved.

In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.

In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.

The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.

The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:

====================

    Due to bug fixes found by the syzkaller bot and taken into the for-rc
    branch after development for the 4.17 merge window had already started
    being taken into the for-next branch, there were fairly non-trivial
    merge issues that would need to be resolved between the for-rc branch
    and the for-next branch.  This merge resolves those conflicts and
    provides a unified base upon which ongoing development for 4.17 can
    be based.

    Conflicts:
            drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
            (IB/mlx5: Fix cleanup order on unload) added to for-rc and
            commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
            add as part of the devel cycle both needed to modify the
            init/de-init functions used by mlx5.  To support the new
            representors, the new functions added by the cleanup patch
            needed to be made non-static, and the init/de-init list
            added by the representors patch needed to be modified to
            match the init/de-init list changes made by the cleanup
            patch.
    Updates:
            drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
            prototypes added by representors patch to reflect new function
            names as changed by cleanup patch
            drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
            stage list to match new order from cleanup patch
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 7 жил өмнө
parent
commit
03fe2debbb
100 өөрчлөгдсөн 1211 нэмэгдсэн , 619 устгасан
  1. 100 71
      Documentation/ABI/testing/sysfs-ata
  2. 58 0
      Documentation/ABI/testing/sysfs-block-device
  3. 89 0
      Documentation/ABI/testing/sysfs-class-scsi_host
  4. 5 0
      Documentation/accelerators/ocxl.rst
  5. 1 0
      Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
  6. 5 1
      Documentation/devicetree/bindings/dma/mv-xor-v2.txt
  7. 25 23
      Documentation/devicetree/bindings/net/dsa/marvell.txt
  8. 5 1
      Documentation/devicetree/bindings/net/renesas,ravb.txt
  9. 1 1
      Documentation/devicetree/bindings/usb/dwc2.txt
  10. 1 0
      Documentation/devicetree/bindings/usb/renesas_usb3.txt
  11. 1 0
      Documentation/devicetree/bindings/usb/renesas_usbhs.txt
  12. 1 0
      Documentation/devicetree/bindings/usb/usb-xhci.txt
  13. 14 4
      Documentation/networking/segmentation-offloads.txt
  14. 1 2
      Documentation/sphinx/kerneldoc.py
  15. 10 1
      MAINTAINERS
  16. 10 1
      Makefile
  17. 2 2
      arch/arm64/kernel/cpu_errata.c
  18. 0 3
      arch/arm64/kvm/guest.c
  19. 14 4
      arch/arm64/mm/mmu.c
  20. 0 1
      arch/h8300/include/asm/byteorder.h
  21. 1 0
      arch/microblaze/Kconfig
  22. 1 1
      arch/microblaze/Kconfig.platform
  23. 0 1
      arch/microblaze/include/asm/setup.h
  24. 0 4
      arch/microblaze/lib/fastcopy.S
  25. 6 58
      arch/microblaze/mm/init.c
  26. 2 0
      arch/mips/ath25/board.c
  27. 2 0
      arch/mips/cavium-octeon/octeon-irq.c
  28. 4 4
      arch/mips/kernel/smp-bmips.c
  29. 6 0
      arch/mips/loongson64/Kconfig
  30. 32 9
      arch/parisc/kernel/cache.c
  31. 2 1
      arch/powerpc/boot/Makefile
  32. 0 1
      arch/powerpc/kernel/prom_init.c
  33. 43 26
      arch/powerpc/kvm/book3s_64_mmu_radix.c
  34. 9 8
      arch/powerpc/kvm/book3s_hv.c
  35. 5 5
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  36. 3 1
      arch/powerpc/kvm/powerpc.c
  37. 1 0
      arch/s390/include/asm/mmu_context.h
  38. 6 4
      arch/s390/kernel/entry.S
  39. 2 2
      arch/s390/kernel/nospec-branch.c
  40. 2 0
      arch/s390/kvm/kvm-s390.c
  41. 13 6
      arch/sparc/mm/tlb.c
  42. 1 10
      arch/x86/Kconfig
  43. 1 15
      arch/x86/entry/entry_64_compat.S
  44. 19 19
      arch/x86/entry/syscalls/syscall_32.tbl
  45. 3 13
      arch/x86/entry/vsyscall/vsyscall_64.c
  46. 1 1
      arch/x86/events/intel/uncore_snbep.c
  47. 44 30
      arch/x86/ia32/sys_ia32.c
  48. 2 0
      arch/x86/include/asm/cpufeatures.h
  49. 1 0
      arch/x86/include/asm/microcode.h
  50. 4 1
      arch/x86/include/asm/nospec-branch.h
  51. 0 2
      arch/x86/include/asm/pgtable_types.h
  52. 1 0
      arch/x86/include/asm/sections.h
  53. 30 18
      arch/x86/include/asm/sys_ia32.h
  54. 1 0
      arch/x86/include/asm/vmx.h
  55. 1 0
      arch/x86/include/uapi/asm/mce.h
  56. 8 2
      arch/x86/kernel/cpu/intel.c
  57. 24 2
      arch/x86/kernel/cpu/mcheck/mce.c
  58. 21 13
      arch/x86/kernel/cpu/microcode/amd.c
  59. 135 43
      arch/x86/kernel/cpu/microcode/core.c
  60. 43 9
      arch/x86/kernel/cpu/microcode/intel.c
  61. 1 1
      arch/x86/kernel/ioport.c
  62. 9 1
      arch/x86/kernel/kprobes/core.c
  63. 65 0
      arch/x86/kernel/signal_compat.c
  64. 2 1
      arch/x86/kernel/vm86_32.c
  65. 2 0
      arch/x86/kernel/vmlinux.lds.S
  66. 3 1
      arch/x86/kvm/mmu.c
  67. 8 1
      arch/x86/kvm/vmx.c
  68. 3 3
      arch/x86/mm/fault.c
  69. 28 32
      arch/x86/mm/init_64.c
  70. 48 0
      arch/x86/mm/pgtable.c
  71. 1 1
      arch/x86/mm/pti.c
  72. 2 1
      arch/x86/net/bpf_jit_comp.c
  73. 2 2
      drivers/acpi/acpi_watchdog.c
  74. 3 45
      drivers/acpi/battery.c
  75. 7 3
      drivers/acpi/nfit/core.c
  76. 6 4
      drivers/acpi/numa.c
  77. 3 1
      drivers/ata/ahci.c
  78. 10 0
      drivers/ata/libahci.c
  79. 1 1
      drivers/ata/libahci_platform.c
  80. 23 3
      drivers/ata/libata-core.c
  81. 2 1
      drivers/ata/libata-eh.c
  82. 10 2
      drivers/ata/libata-scsi.c
  83. 39 23
      drivers/ata/sata_rcar.c
  84. 3 3
      drivers/auxdisplay/img-ascii-lcd.c
  85. 3 3
      drivers/auxdisplay/panel.c
  86. 1 1
      drivers/block/loop.c
  87. 8 9
      drivers/block/xen-blkfront.c
  88. 4 4
      drivers/bluetooth/btusb.c
  89. 9 4
      drivers/bluetooth/hci_bcm.c
  90. 8 4
      drivers/clk/bcm/clk-bcm2835.c
  91. 17 11
      drivers/clk/clk-aspeed.c
  92. 28 18
      drivers/clk/clk.c
  93. 2 0
      drivers/clk/hisilicon/clk-hi3660-stub.c
  94. 17 3
      drivers/clk/imx/clk-imx51-imx53.c
  95. 2 3
      drivers/clk/qcom/apcs-msm8916.c
  96. 3 3
      drivers/clk/sunxi-ng/ccu-sun6i-a31.c
  97. 1 1
      drivers/clk/ti/clk-33xx.c
  98. 1 1
      drivers/clk/ti/clk-43xx.c
  99. 2 0
      drivers/clk/ti/clkctrl.c
  100. 1 0
      drivers/clocksource/Kconfig

+ 100 - 71
Documentation/ABI/testing/sysfs-ata

@@ -1,110 +1,139 @@
 What:		/sys/class/ata_...
 What:		/sys/class/ata_...
-Date:		August 2008
-Contact:	Gwendal Grignou<gwendal@google.com>
 Description:
 Description:
-
-Provide a place in sysfs for storing the ATA topology of the system.  This allows
-retrieving various information about ATA objects.
+		Provide a place in sysfs for storing the ATA topology of the
+		system. This allows retrieving various information about ATA
+		objects.
 
 
 Files under /sys/class/ata_port
 Files under /sys/class/ata_port
 -------------------------------
 -------------------------------
 
 
-	For each port, a directory ataX is created where X is the ata_port_id of
-	the port. The device parent is the ata host device.
+For each port, a directory ataX is created where X is the ata_port_id of the
+port. The device parent is the ata host device.
 
 
-idle_irq (read)
 
 
-	Number of IRQ received by the port while idle [some ata HBA only].
+What:		/sys/class/ata_port/ataX/nr_pmp_links
+What:		/sys/class/ata_port/ataX/idle_irq
+Date:		May, 2010
+KernelVersion:	v2.6.37
+Contact:	Gwendal Grignou <gwendal@chromium.org>
+Description:
+		nr_pmp_links:	(RO) If a SATA Port Multiplier (PM) is
+				connected, the number of links behind it.
 
 
-nr_pmp_links (read)
+		idle_irq:	(RO) Number of IRQ received by the port while
+				idle [some ata HBA only].
 
 
-	If a SATA Port Multiplier (PM) is connected, number of link behind it.
+
+What:		/sys/class/ata_port/ataX/port_no
+Date:		May, 2013
+KernelVersion:	v3.11
+Contact:	Gwendal Grignou <gwendal@chromium.org>
+Description:
+		(RO) Host local port number. While registering host controller,
+		port numbers are tracked based upon number of ports available on
+		the controller. This attribute is needed by udev for composing
+		persistent links in /dev/disk/by-path.
 
 
 Files under /sys/class/ata_link
 Files under /sys/class/ata_link
 -------------------------------
 -------------------------------
 
 
-	Behind each port, there is a ata_link. If there is a SATA PM in the
-	topology, 15 ata_link objects are created.
-
-	If a link is behind a port, the directory name is linkX, where X is
-	ata_port_id of the port.
-	If a link is behind a PM, its name is linkX.Y where X is ata_port_id
-	of the parent port and Y the PM port.
+Behind each port, there is a ata_link. If there is a SATA PM in the topology, 15
+ata_link objects are created.
 
 
-hw_sata_spd_limit
+If a link is behind a port, the directory name is linkX, where X is ata_port_id
+of the port. If a link is behind a PM, its name is linkX.Y where X is
+ata_port_id of the parent port and Y the PM port.
 
 
-	Maximum speed supported by the connected SATA device.
 
 
-sata_spd_limit
+What:		/sys/class/ata_link/linkX[.Y]/hw_sata_spd_limit
+What:		/sys/class/ata_link/linkX[.Y]/sata_spd_limit
+What:		/sys/class/ata_link/linkX[.Y]/sata_spd
+Date:		May, 2010
+KernelVersion:	v2.6.37
+Contact:	Gwendal Grignou <gwendal@chromium.org>
+Description:
+		hw_sata_spd_limit:	(RO) Maximum speed supported by the
+					connected SATA device.
 
 
-	Maximum speed imposed by libata.
+		sata_spd_limit:		(RO) Maximum speed imposed by libata.
 
 
-sata_spd
+		sata_spd:		(RO) Current speed of the link
+					eg. 1.5, 3 Gbps etc.
 
 
-	Current speed of the link [1.5, 3Gps,...].
 
 
 Files under /sys/class/ata_device
 Files under /sys/class/ata_device
 ---------------------------------
 ---------------------------------
 
 
-	Behind each link, up to two ata device are created.
-	The name of the directory is devX[.Y].Z where:
-	- X is ata_port_id of the port where the device is connected,
-	- Y the port of the PM if any, and
-	- Z the device id: for PATA, there is usually 2 devices [0,1],
-	only 1 for SATA.
-
-class
-	Device class. Can be "ata" for disk, "atapi" for packet device,
-	"pmp" for PM, or "none" if no device was found behind the link.
-
-dma_mode
+Behind each link, up to two ata devices are created.
+The name of the directory is devX[.Y].Z where:
+- X is ata_port_id of the port where the device is connected,
+- Y the port of the PM if any, and
+- Z the device id: for PATA, there is usually 2 devices [0,1], only 1 for SATA.
+
+
+What:		/sys/class/ata_device/devX[.Y].Z/spdn_cnt
+What:		/sys/class/ata_device/devX[.Y].Z/gscr
+What:		/sys/class/ata_device/devX[.Y].Z/ering
+What:		/sys/class/ata_device/devX[.Y].Z/id
+What:		/sys/class/ata_device/devX[.Y].Z/pio_mode
+What:		/sys/class/ata_device/devX[.Y].Z/xfer_mode
+What:		/sys/class/ata_device/devX[.Y].Z/dma_mode
+What:		/sys/class/ata_device/devX[.Y].Z/class
+Date:		May, 2010
+KernelVersion:	v2.6.37
+Contact:	Gwendal Grignou <gwendal@chromium.org>
+Description:
+		spdn_cnt:	(RO) Number of times libata decided to lower the
+				speed of link due to errors.
 
 
-	Transfer modes supported by the device when in DMA mode.
-	Mostly used by PATA device.
+		gscr:		(RO) Cached result of the dump of PM GSCR
+				register. Valid registers are:
 
 
-pio_mode
+				0:      SATA_PMP_GSCR_PROD_ID,
+				1:	SATA_PMP_GSCR_REV,
+				2:      SATA_PMP_GSCR_PORT_INFO,
+				32:     SATA_PMP_GSCR_ERROR,
+				33:     SATA_PMP_GSCR_ERROR_EN,
+				64:     SATA_PMP_GSCR_FEAT,
+				96:     SATA_PMP_GSCR_FEAT_EN,
+				130:    SATA_PMP_GSCR_SII_GPIO
 
 
-	Transfer modes supported by the device when in PIO mode.
-	Mostly used by PATA device.
+				Only valid if the device is a PM.
 
 
-xfer_mode
+		ering:		(RO) Formatted output of the error ring of the
+				device.
 
 
-	Current transfer mode.
+		id:		(RO) Cached result of IDENTIFY command, as
+				described in ATA8 7.16 and 7.17. Only valid if
+				the device is not a PM.
 
 
-id
+		pio_mode:	(RO) Transfer modes supported by the device when
+				in PIO mode. Mostly used by PATA device.
 
 
-	Cached result of IDENTIFY command, as described in ATA8 7.16 and 7.17.
-	Only valid if the device is not a PM.
+		xfer_mode:	(RO) Current transfer mode
 
 
-gscr
+		dma_mode:	(RO) Transfer modes supported by the device when
+				in DMA mode. Mostly used by PATA device.
 
 
-	Cached result of the dump of PM GSCR register.
-	Valid registers are:
-	0: 	SATA_PMP_GSCR_PROD_ID,
-	1: 	SATA_PMP_GSCR_REV,
-	2: 	SATA_PMP_GSCR_PORT_INFO,
-	32:	SATA_PMP_GSCR_ERROR,
-	33:	SATA_PMP_GSCR_ERROR_EN,
-	64:	SATA_PMP_GSCR_FEAT,
-	96:	SATA_PMP_GSCR_FEAT_EN,
-	130:	SATA_PMP_GSCR_SII_GPIO
-	Only valid if the device is a PM.
+		class:		(RO) Device class. Can be "ata" for disk,
+				"atapi" for packet device, "pmp" for PM, or
+				"none" if no device was found behind the link.
 
 
-trim
 
 
-	Shows the DSM TRIM mode currently used by the device. Valid
-	values are:
-	unsupported:		Drive does not support DSM TRIM
-	unqueued:		Drive supports unqueued DSM TRIM only
-	queued:			Drive supports queued DSM TRIM
-	forced_unqueued:	Drive's queued DSM support is known to be
-				buggy and only unqueued TRIM commands
-				are sent
+What:		/sys/class/ata_device/devX[.Y].Z/trim
+Date:		May, 2015
+KernelVersion:	v4.10
+Contact:	Gwendal Grignou <gwendal@chromium.org>
+Description:
+		(RO) Shows the DSM TRIM mode currently used by the device. Valid
+		values are:
 
 
-spdn_cnt
+		unsupported:		Drive does not support DSM TRIM
 
 
-	Number of time libata decided to lower the speed of link due to errors.
+		unqueued:               Drive supports unqueued DSM TRIM only
 
 
-ering
+		queued:                 Drive supports queued DSM TRIM
 
 
-	Formatted output of the error ring of the device.
+		forced_unqueued:	Drive's queued DSM support is known to
+					be buggy and only unqueued TRIM commands
+					are sent

+ 58 - 0
Documentation/ABI/testing/sysfs-block-device

@@ -0,0 +1,58 @@
+What:		/sys/block/*/device/sw_activity
+Date:		Jun, 2008
+KernelVersion:	v2.6.27
+Contact:	linux-ide@vger.kernel.org
+Description:
+		(RW) Used by drivers which support software controlled activity
+		LEDs.
+
+		It has the following valid values:
+
+		0	OFF - the LED is not activated on activity
+		1	BLINK_ON - the LED blinks on every 10ms when activity is
+			detected.
+		2	BLINK_OFF - the LED is on when idle, and blinks off
+			every 10ms when activity is detected.
+
+		Note that the user must turn sw_activity OFF it they wish to
+		control the activity LED via the em_message file.
+
+
+What:		/sys/block/*/device/unload_heads
+Date:		Sep, 2008
+KernelVersion:	v2.6.28
+Contact:	linux-ide@vger.kernel.org
+Description:
+		(RW) Hard disk shock protection
+
+		Writing an integer value to this file will take the heads of the
+		respective drive off the platter and block all I/O operations
+		for the specified number of milliseconds.
+
+		- If the device does not support the unload heads feature,
+		  access is denied with -EOPNOTSUPP.
+		- The maximal value accepted for a timeout is 30000
+		  milliseconds.
+		- A previously set timeout can be cancelled and disk can resume
+		  normal operation immediately by specifying a timeout of 0.
+		- Some hard drives only comply with an earlier version of the
+		  ATA standard, but support the unload feature nonetheless.
+		  There is no safe way Linux can detect these devices, so this
+		  is not enabled by default. If it is known that your device
+		  does support the unload feature, then you can tell the kernel
+		  to enable it by writing -1. It can be disabled again by
+		  writing -2.
+		- Values below -2 are rejected with -EINVAL
+
+		For more information, see
+		Documentation/laptops/disk-shock-protection.txt
+
+
+What:		/sys/block/*/device/ncq_prio_enable
+Date:		Oct, 2016
+KernelVersion:	v4.10
+Contact:	linux-ide@vger.kernel.org
+Description:
+		(RW) Write to the file to turn on or off the SATA ncq (native
+		command queueing) support. By default this feature is turned
+		off.

+ 89 - 0
Documentation/ABI/testing/sysfs-class-scsi_host

@@ -27,3 +27,92 @@ Description:	This file contains the current status of the "SSD Smart Path"
 		the direct i/o path to physical devices.  This setting is
 		the direct i/o path to physical devices.  This setting is
 		controller wide, affecting all configured logical drives on the
 		controller wide, affecting all configured logical drives on the
 		controller.  This file is readable and writable.
 		controller.  This file is readable and writable.
+
+What:		/sys/class/scsi_host/hostX/link_power_management_policy
+Date:		Oct, 2007
+KernelVersion:	v2.6.24
+Contact:	linux-ide@vger.kernel.org
+Description:
+		(RW) This parameter allows the user to read and set the link
+		(interface) power management.
+
+		There are four possible options:
+
+		min_power: Tell the controller to try to make the link use the
+		least possible power when possible. This may sacrifice some
+		performance due to increased latency when coming out of lower
+		power states.
+
+		max_performance: Generally, this means no power management.
+		Tell the controller to have performance be a priority over power
+		management.
+
+		medium_power: Tell the controller to enter a lower power state
+		when possible, but do not enter the lowest power state, thus
+		improving latency over min_power setting.
+
+		med_power_with_dipm: Identical to the existing medium_power
+		setting except that it enables dipm (device initiated power
+		management) on top, which makes it match the Windows IRST (Intel
+		Rapid Storage Technology) driver settings. This setting is also
+		close to min_power, except that:
+		a) It does not use host-initiated slumber mode, but it does
+		allow device-initiated slumber
+		b) It does not enable low power device sleep mode (DevSlp).
+
+What:		/sys/class/scsi_host/hostX/em_message
+What:		/sys/class/scsi_host/hostX/em_message_type
+Date:		Jun, 2008
+KernelVersion:	v2.6.27
+Contact:	linux-ide@vger.kernel.org
+Description:
+		em_message: (RW) Enclosure management support. For the LED
+		protocol, writes and reads correspond to the LED message format
+		as defined in the AHCI spec.
+
+		The user must turn sw_activity (under /sys/block/*/device/) OFF
+		it they wish to control the activity LED via the em_message
+		file.
+
+		em_message_type: (RO) Displays the current enclosure management
+		protocol that is being used by the driver (for eg. LED, SAF-TE,
+		SES-2, SGPIO etc).
+
+What:		/sys/class/scsi_host/hostX/ahci_port_cmd
+What:		/sys/class/scsi_host/hostX/ahci_host_caps
+What:		/sys/class/scsi_host/hostX/ahci_host_cap2
+Date:		Mar, 2010
+KernelVersion:	v2.6.35
+Contact:	linux-ide@vger.kernel.org
+Description:
+		[to be documented]
+
+What:		/sys/class/scsi_host/hostX/ahci_host_version
+Date:		Mar, 2010
+KernelVersion:	v2.6.35
+Contact:	linux-ide@vger.kernel.org
+Description:
+		(RO) Display the version of the AHCI spec implemented by the
+		host.
+
+What:		/sys/class/scsi_host/hostX/em_buffer
+Date:		Apr, 2010
+KernelVersion:	v2.6.35
+Contact:	linux-ide@vger.kernel.org
+Description:
+		(RW) Allows access to AHCI EM (enclosure management) buffer
+		directly if the host supports EM.
+
+		For eg. the AHCI driver supports SGPIO EM messages but the
+		SATA/AHCI specs do not define the SGPIO message format of the EM
+		buffer. Different hardware(HW) vendors may have different
+		definitions. With the em_buffer attribute, this issue can be
+		solved by allowing HW vendors to provide userland drivers and
+		tools for their SGPIO initiators.
+
+What:		/sys/class/scsi_host/hostX/em_message_supported
+Date:		Oct, 2009
+KernelVersion:	v2.6.39
+Contact:	linux-ide@vger.kernel.org
+Description:
+		(RO) Displays supported enclosure management message types.

+ 5 - 0
Documentation/accelerators/ocxl.rst

@@ -152,6 +152,11 @@ OCXL_IOCTL_IRQ_SET_FD:
   Associate an event fd to an AFU interrupt so that the user process
   Associate an event fd to an AFU interrupt so that the user process
   can be notified when the AFU sends an interrupt.
   can be notified when the AFU sends an interrupt.
 
 
+OCXL_IOCTL_GET_METADATA:
+
+  Obtains configuration information from the card, such at the size of
+  MMIO areas, the AFU version, and the PASID for the current context.
+
 
 
 mmap
 mmap
 ----
 ----

+ 1 - 0
Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt

@@ -16,6 +16,7 @@ Required properties:
 - ddc: phandle to the hdmi ddc node
 - ddc: phandle to the hdmi ddc node
 - phy: phandle to the hdmi phy node
 - phy: phandle to the hdmi phy node
 - samsung,syscon-phandle: phandle for system controller node for PMU.
 - samsung,syscon-phandle: phandle for system controller node for PMU.
+- #sound-dai-cells: should be 0.
 
 
 Required properties for Exynos 4210, 4212, 5420 and 5433:
 Required properties for Exynos 4210, 4212, 5420 and 5433:
 - clocks: list of clock IDs from SoC clock driver.
 - clocks: list of clock IDs from SoC clock driver.

+ 5 - 1
Documentation/devicetree/bindings/dma/mv-xor-v2.txt

@@ -11,7 +11,11 @@ Required properties:
   interrupts.
   interrupts.
 
 
 Optional properties:
 Optional properties:
-- clocks: Optional reference to the clock used by the XOR engine.
+- clocks: Optional reference to the clocks used by the XOR engine.
+- clock-names: mandatory if there is a second clock, in this case the
+   name must be "core" for the first clock and "reg" for the second
+   one
+
 
 
 Example:
 Example:
 
 

+ 25 - 23
Documentation/devicetree/bindings/net/dsa/marvell.txt

@@ -59,14 +59,15 @@ Example:
 			compatible = "marvell,mv88e6085";
 			compatible = "marvell,mv88e6085";
 			reg = <0>;
 			reg = <0>;
 			reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
 			reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
-		};
-		mdio {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			switch1phy0: switch1phy0@0 {
-				reg = <0>;
-				interrupt-parent = <&switch0>;
-				interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				switch1phy0: switch1phy0@0 {
+					reg = <0>;
+					interrupt-parent = <&switch0>;
+					interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+				};
 			};
 			};
 		};
 		};
 	};
 	};
@@ -83,23 +84,24 @@ Example:
 			compatible = "marvell,mv88e6390";
 			compatible = "marvell,mv88e6390";
 			reg = <0>;
 			reg = <0>;
 			reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
 			reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
-		};
-		mdio {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			switch1phy0: switch1phy0@0 {
-				reg = <0>;
-				interrupt-parent = <&switch0>;
-				interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				switch1phy0: switch1phy0@0 {
+					reg = <0>;
+					interrupt-parent = <&switch0>;
+					interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+				};
 			};
 			};
-		};
 
 
-		mdio1 {
-			compatible = "marvell,mv88e6xxx-mdio-external";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			switch1phy9: switch1phy0@9 {
-				reg = <9>;
+			mdio1 {
+				compatible = "marvell,mv88e6xxx-mdio-external";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				switch1phy9: switch1phy0@9 {
+					reg = <9>;
+				};
 			};
 			};
 		};
 		};
 	};
 	};

+ 5 - 1
Documentation/devicetree/bindings/net/renesas,ravb.txt

@@ -27,7 +27,11 @@ Required properties:
 	SoC-specific version corresponding to the platform first followed by
 	SoC-specific version corresponding to the platform first followed by
 	the generic version.
 	the generic version.
 
 
-- reg: offset and length of (1) the register block and (2) the stream buffer.
+- reg: Offset and length of (1) the register block and (2) the stream buffer.
+       The region for the register block is mandatory.
+       The region for the stream buffer is optional, as it is only present on
+       R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
+       and M3-N (R8A77965).
 - interrupts: A list of interrupt-specifiers, one for each entry in
 - interrupts: A list of interrupt-specifiers, one for each entry in
 	      interrupt-names.
 	      interrupt-names.
 	      If interrupt-names is not present, an interrupt specifier
 	      If interrupt-names is not present, an interrupt specifier

+ 1 - 1
Documentation/devicetree/bindings/usb/dwc2.txt

@@ -19,7 +19,7 @@ Required properties:
   configured in FS mode;
   configured in FS mode;
   - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
   - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
   configured in HS mode;
   configured in HS mode;
-  - "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs
+  - "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs
     configured in HS mode;
     configured in HS mode;
 - reg : Should contain 1 register range (address and length)
 - reg : Should contain 1 register range (address and length)
 - interrupts : Should contain 1 interrupt
 - interrupts : Should contain 1 interrupt

+ 1 - 0
Documentation/devicetree/bindings/usb/renesas_usb3.txt

@@ -4,6 +4,7 @@ Required properties:
   - compatible: Must contain one of the following:
   - compatible: Must contain one of the following:
 	- "renesas,r8a7795-usb3-peri"
 	- "renesas,r8a7795-usb3-peri"
 	- "renesas,r8a7796-usb3-peri"
 	- "renesas,r8a7796-usb3-peri"
+	- "renesas,r8a77965-usb3-peri"
 	- "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
 	- "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
 	  device
 	  device
 
 

+ 1 - 0
Documentation/devicetree/bindings/usb/renesas_usbhs.txt

@@ -12,6 +12,7 @@ Required properties:
 	- "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
 	- "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
 	- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
 	- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
 	- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
 	- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
+	- "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
 	- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
 	- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
 	- "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
 	- "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
 	- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
 	- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices

+ 1 - 0
Documentation/devicetree/bindings/usb/usb-xhci.txt

@@ -13,6 +13,7 @@ Required properties:
     - "renesas,xhci-r8a7793" for r8a7793 SoC
     - "renesas,xhci-r8a7793" for r8a7793 SoC
     - "renesas,xhci-r8a7795" for r8a7795 SoC
     - "renesas,xhci-r8a7795" for r8a7795 SoC
     - "renesas,xhci-r8a7796" for r8a7796 SoC
     - "renesas,xhci-r8a7796" for r8a7796 SoC
+    - "renesas,xhci-r8a77965" for r8a77965 SoC
     - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
     - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
       device
       device
     - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
     - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device

+ 14 - 4
Documentation/networking/segmentation-offloads.txt

@@ -20,8 +20,8 @@ TCP Segmentation Offload
 
 
 TCP segmentation allows a device to segment a single frame into multiple
 TCP segmentation allows a device to segment a single frame into multiple
 frames with a data payload size specified in skb_shinfo()->gso_size.
 frames with a data payload size specified in skb_shinfo()->gso_size.
-When TCP segmentation requested the bit for either SKB_GSO_TCP or
-SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
+When TCP segmentation requested the bit for either SKB_GSO_TCPV4 or
+SKB_GSO_TCPV6 should be set in skb_shinfo()->gso_type and
 skb_shinfo()->gso_size should be set to a non-zero value.
 skb_shinfo()->gso_size should be set to a non-zero value.
 
 
 TCP segmentation is dependent on support for the use of partial checksum
 TCP segmentation is dependent on support for the use of partial checksum
@@ -153,8 +153,18 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
 
 
 Therefore, any code in the core networking stack must be aware of the
 Therefore, any code in the core networking stack must be aware of the
 possibility that gso_size will be GSO_BY_FRAGS and handle that case
 possibility that gso_size will be GSO_BY_FRAGS and handle that case
-appropriately. (For size checks, the skb_gso_validate_*_len family of
-helpers do this automatically.)
+appropriately.
+
+There are some helpers to make this easier:
+
+ - skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
+   an skb is an SCTP GSO skb.
+
+ - For size checks, the skb_gso_validate_*_len family of helpers correctly
+   considers GSO_BY_FRAGS.
+
+ - For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
+   will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
 
 
 This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
 This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
 set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
 set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.

+ 1 - 2
Documentation/sphinx/kerneldoc.py

@@ -36,8 +36,7 @@ import glob
 
 
 from docutils import nodes, statemachine
 from docutils import nodes, statemachine
 from docutils.statemachine import ViewList
 from docutils.statemachine import ViewList
-from docutils.parsers.rst import directives
-from sphinx.util.compat import Directive
+from docutils.parsers.rst import directives, Directive
 from sphinx.ext.autodoc import AutodocReporter
 from sphinx.ext.autodoc import AutodocReporter
 
 
 __version__  = '1.0'
 __version__  = '1.0'

+ 10 - 1
MAINTAINERS

@@ -9941,6 +9941,13 @@ F:	Documentation/ABI/stable/sysfs-bus-nvmem
 F:	include/linux/nvmem-consumer.h
 F:	include/linux/nvmem-consumer.h
 F:	include/linux/nvmem-provider.h
 F:	include/linux/nvmem-provider.h
 
 
+NXP SGTL5000 DRIVER
+M:	Fabio Estevam <fabio.estevam@nxp.com>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:	Maintained
+F:	Documentation/devicetree/bindings/sound/sgtl5000.txt
+F:	sound/soc/codecs/sgtl5000*
+
 NXP TDA998X DRM DRIVER
 NXP TDA998X DRM DRIVER
 M:	Russell King <linux@armlinux.org.uk>
 M:	Russell King <linux@armlinux.org.uk>
 S:	Supported
 S:	Supported
@@ -10343,7 +10350,7 @@ F:	drivers/oprofile/
 F:	include/linux/oprofile.h
 F:	include/linux/oprofile.h
 
 
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
-M:	Mark Fasheh <mfasheh@versity.com>
+M:	Mark Fasheh <mark@fasheh.com>
 M:	Joel Becker <jlbec@evilplan.org>
 M:	Joel Becker <jlbec@evilplan.org>
 L:	ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 L:	ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 W:	http://ocfs2.wiki.kernel.org
 W:	http://ocfs2.wiki.kernel.org
@@ -10853,6 +10860,7 @@ F:	drivers/platform/x86/peaq-wmi.c
 PER-CPU MEMORY ALLOCATOR
 PER-CPU MEMORY ALLOCATOR
 M:	Tejun Heo <tj@kernel.org>
 M:	Tejun Heo <tj@kernel.org>
 M:	Christoph Lameter <cl@linux.com>
 M:	Christoph Lameter <cl@linux.com>
+M:	Dennis Zhou <dennisszhou@gmail.com>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
 S:	Maintained
 S:	Maintained
 F:	include/linux/percpu*.h
 F:	include/linux/percpu*.h
@@ -12123,6 +12131,7 @@ M:	Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Supported
 S:	Supported
 F:	sound/soc/samsung/
 F:	sound/soc/samsung/
+F:	Documentation/devicetree/bindings/sound/samsung*
 
 
 SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
 SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
 M:	Krzysztof Kozlowski <krzk@kernel.org>
 M:	Krzysztof Kozlowski <krzk@kernel.org>

+ 10 - 1
Makefile

@@ -2,7 +2,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 16
 PATCHLEVEL = 16
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Fearless Coyote
 NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
 # disable invalid "can't wrap" optimizations for signed / pointers
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS	+= $(call cc-option,-fno-strict-overflow)
 KBUILD_CFLAGS	+= $(call cc-option,-fno-strict-overflow)
 
 
+# clang sets -fmerge-all-constants by default as optimization, but this
+# is non-conforming behavior for C and in fact breaks the kernel, so we
+# need to disable it here generally.
+KBUILD_CFLAGS	+= $(call cc-option,-fno-merge-all-constants)
+
+# for gcc -fno-merge-all-constants disables everything, but it is fine
+# to have actual conforming behavior enabled.
+KBUILD_CFLAGS	+= $(call cc-option,-fmerge-constants)
+
 # Make sure -fstack-check isn't enabled (like gentoo apparently did)
 # Make sure -fstack-check isn't enabled (like gentoo apparently did)
 KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
 KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
 
 

+ 2 - 2
arch/arm64/kernel/cpu_errata.c

@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
 	case PSCI_CONDUIT_HVC:
 	case PSCI_CONDUIT_HVC:
 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if (res.a0)
+		if ((int)res.a0 < 0)
 			return 0;
 			return 0;
 		cb = call_hvc_arch_workaround_1;
 		cb = call_hvc_arch_workaround_1;
 		smccc_start = __smccc_workaround_1_hvc_start;
 		smccc_start = __smccc_workaround_1_hvc_start;
@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
 	case PSCI_CONDUIT_SMC:
 	case PSCI_CONDUIT_SMC:
 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-		if (res.a0)
+		if ((int)res.a0 < 0)
 			return 0;
 			return 0;
 		cb = call_smc_arch_workaround_1;
 		cb = call_smc_arch_workaround_1;
 		smccc_start = __smccc_workaround_1_smc_start;
 		smccc_start = __smccc_workaround_1_smc_start;

+ 0 - 3
arch/arm64/kvm/guest.c

@@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
-	vcpu_load(vcpu);
-
 	trace_kvm_set_guest_debug(vcpu, dbg->control);
 	trace_kvm_set_guest_debug(vcpu, dbg->control);
 
 
 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
@@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 	}
 	}
 
 
 out:
 out:
-	vcpu_put(vcpu);
 	return ret;
 	return ret;
 }
 }
 
 

+ 14 - 4
arch/arm64/mm/mmu.c

@@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
 	 * The following mapping attributes may be updated in live
 	 * The following mapping attributes may be updated in live
 	 * kernel mappings without the need for break-before-make.
 	 * kernel mappings without the need for break-before-make.
 	 */
 	 */
-	static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
+	static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
 
 
 	/* creating or taking down mappings is always safe */
 	/* creating or taking down mappings is always safe */
 	if (old == 0 || new == 0)
 	if (old == 0 || new == 0)
@@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
 	if ((old | new) & PTE_CONT)
 	if ((old | new) & PTE_CONT)
 		return false;
 		return false;
 
 
-	/* Transitioning from Global to Non-Global is safe */
-	if (((old ^ new) == PTE_NG) && (new & PTE_NG))
-		return true;
+	/* Transitioning from Non-Global to Global is unsafe */
+	if (old & ~new & PTE_NG)
+		return false;
 
 
 	return ((old ^ new) & ~mask) == 0;
 	return ((old ^ new) & ~mask) == 0;
 }
 }
@@ -972,3 +972,13 @@ int pmd_clear_huge(pmd_t *pmdp)
 	pmd_clear(pmdp);
 	pmd_clear(pmdp);
 	return 1;
 	return 1;
 }
 }
+
+int pud_free_pmd_page(pud_t *pud)
+{
+	return pud_none(*pud);
+}
+
+int pmd_free_pte_page(pmd_t *pmd)
+{
+	return pmd_none(*pmd);
+}

+ 0 - 1
arch/h8300/include/asm/byteorder.h

@@ -2,7 +2,6 @@
 #ifndef __H8300_BYTEORDER_H__
 #ifndef __H8300_BYTEORDER_H__
 #define __H8300_BYTEORDER_H__
 #define __H8300_BYTEORDER_H__
 
 
-#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
 #include <linux/byteorder/big_endian.h>
 #include <linux/byteorder/big_endian.h>
 
 
 #endif
 #endif

+ 1 - 0
arch/microblaze/Kconfig

@@ -24,6 +24,7 @@ config MICROBLAZE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_TRACER
+	select NO_BOOTMEM
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE

+ 1 - 1
arch/microblaze/Kconfig.platform

@@ -8,7 +8,6 @@ menu "Platform options"
 
 
 config OPT_LIB_FUNCTION
 config OPT_LIB_FUNCTION
 	bool "Optimalized lib function"
 	bool "Optimalized lib function"
-	depends on CPU_LITTLE_ENDIAN
 	default y
 	default y
 	help
 	help
 	  Allows turn on optimalized library function (memcpy and memmove).
 	  Allows turn on optimalized library function (memcpy and memmove).
@@ -21,6 +20,7 @@ config OPT_LIB_FUNCTION
 config OPT_LIB_ASM
 config OPT_LIB_ASM
 	bool "Optimalized lib function ASM"
 	bool "Optimalized lib function ASM"
 	depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
 	depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
+	depends on CPU_BIG_ENDIAN
 	default n
 	default n
 	help
 	help
 	  Allows turn on optimalized library function (memcpy and memmove).
 	  Allows turn on optimalized library function (memcpy and memmove).

+ 0 - 1
arch/microblaze/include/asm/setup.h

@@ -44,7 +44,6 @@ void machine_shutdown(void);
 void machine_halt(void);
 void machine_halt(void);
 void machine_power_off(void);
 void machine_power_off(void);
 
 
-extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 
 
 # endif /* __ASSEMBLY__ */
 # endif /* __ASSEMBLY__ */

+ 0 - 4
arch/microblaze/lib/fastcopy.S

@@ -29,10 +29,6 @@
  *	between mem locations with size of xfer spec'd in bytes
  *	between mem locations with size of xfer spec'd in bytes
  */
  */
 
 
-#ifdef __MICROBLAZEEL__
-#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
-#endif
-
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 	.text
 	.text
 	.globl	memcpy
 	.globl	memcpy

+ 6 - 58
arch/microblaze/mm/init.c

@@ -32,9 +32,6 @@ int mem_init_done;
 #ifndef CONFIG_MMU
 #ifndef CONFIG_MMU
 unsigned int __page_offset;
 unsigned int __page_offset;
 EXPORT_SYMBOL(__page_offset);
 EXPORT_SYMBOL(__page_offset);
-
-#else
-static int init_bootmem_done;
 #endif /* CONFIG_MMU */
 #endif /* CONFIG_MMU */
 
 
 char *klimit = _end;
 char *klimit = _end;
@@ -117,7 +114,6 @@ static void __init paging_init(void)
 
 
 void __init setup_memory(void)
 void __init setup_memory(void)
 {
 {
-	unsigned long map_size;
 	struct memblock_region *reg;
 	struct memblock_region *reg;
 
 
 #ifndef CONFIG_MMU
 #ifndef CONFIG_MMU
@@ -174,17 +170,6 @@ void __init setup_memory(void)
 	pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
 	pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
 	pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
 	pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
 
 
-	/*
-	 * Find an area to use for the bootmem bitmap.
-	 * We look for the first area which is at least
-	 * 128kB in length (128kB is enough for a bitmap
-	 * for 4GB of memory, using 4kB pages), plus 1 page
-	 * (in case the address isn't page-aligned).
-	 */
-	map_size = init_bootmem_node(NODE_DATA(0),
-		PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
-	memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
-
 	/* Add active regions with valid PFNs */
 	/* Add active regions with valid PFNs */
 	for_each_memblock(memory, reg) {
 	for_each_memblock(memory, reg) {
 		unsigned long start_pfn, end_pfn;
 		unsigned long start_pfn, end_pfn;
@@ -196,32 +181,9 @@ void __init setup_memory(void)
 				  &memblock.memory, 0);
 				  &memblock.memory, 0);
 	}
 	}
 
 
-	/* free bootmem is whole main memory */
-	free_bootmem_with_active_regions(0, max_low_pfn);
-
-	/* reserve allocate blocks */
-	for_each_memblock(reserved, reg) {
-		unsigned long top = reg->base + reg->size - 1;
-
-		pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
-			 (u32) reg->base, (u32) reg->size, top,
-						memory_start + lowmem_size - 1);
-
-		if (top <= (memory_start + lowmem_size - 1)) {
-			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
-		} else if (reg->base < (memory_start + lowmem_size - 1)) {
-			unsigned long trunc_size = memory_start + lowmem_size -
-								reg->base;
-			reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
-		}
-	}
-
 	/* XXX need to clip this if using highmem? */
 	/* XXX need to clip this if using highmem? */
 	sparse_memory_present_with_active_regions(0);
 	sparse_memory_present_with_active_regions(0);
 
 
-#ifdef CONFIG_MMU
-	init_bootmem_done = 1;
-#endif
 	paging_init();
 	paging_init();
 }
 }
 
 
@@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void)
 /* This is only called until mem_init is done. */
 /* This is only called until mem_init is done. */
 void __init *early_get_page(void)
 void __init *early_get_page(void)
 {
 {
-	void *p;
-	if (init_bootmem_done) {
-		p = alloc_bootmem_pages(PAGE_SIZE);
-	} else {
-		/*
-		 * Mem start + kernel_tlb -> here is limit
-		 * because of mem mapping from head.S
-		 */
-		p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
-					memory_start + kernel_tlb));
-	}
-	return p;
+	/*
+	 * Mem start + kernel_tlb -> here is limit
+	 * because of mem mapping from head.S
+	 */
+	return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
+				memory_start + kernel_tlb));
 }
 }
 
 
 #endif /* CONFIG_MMU */
 #endif /* CONFIG_MMU */
 
 
-void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
-{
-	if (mem_init_done)
-		return kmalloc(size, mask);
-	else
-		return alloc_bootmem(size);
-}
-
 void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
 void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
 {
 {
 	void *p;
 	void *p;

+ 2 - 0
arch/mips/ath25/board.c

@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
 	}
 	}
 
 
 	board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
 	board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
+	if (!board_data)
+		goto error;
 	ath25_board.config = (struct ath25_boarddata *)board_data;
 	ath25_board.config = (struct ath25_boarddata *)board_data;
 	memcpy_fromio(board_data, bcfg, 0x100);
 	memcpy_fromio(board_data, bcfg, 0x100);
 	if (broken_boarddata) {
 	if (broken_boarddata) {

+ 2 - 0
arch/mips/cavium-octeon/octeon-irq.c

@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
 	}
 	}
 
 
 	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
 	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+	if (!host_data)
+		return -ENOMEM;
 	raw_spin_lock_init(&host_data->lock);
 	raw_spin_lock_init(&host_data->lock);
 
 
 	addr = of_get_address(ciu_node, 0, NULL, NULL);
 	addr = of_get_address(ciu_node, 0, NULL, NULL);

+ 4 - 4
arch/mips/kernel/smp-bmips.c

@@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
 		return;
 		return;
 	}
 	}
 
 
-	if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
-			"smp_ipi0", NULL))
+	if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
+			IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
 		panic("Can't request IPI0 interrupt");
 		panic("Can't request IPI0 interrupt");
-	if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
-			"smp_ipi1", NULL))
+	if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
+			IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
 		panic("Can't request IPI1 interrupt");
 		panic("Can't request IPI1 interrupt");
 }
 }
 
 

+ 6 - 0
arch/mips/loongson64/Kconfig

@@ -7,6 +7,8 @@ choice
 config LEMOTE_FULOONG2E
 config LEMOTE_FULOONG2E
 	bool "Lemote Fuloong(2e) mini-PC"
 	bool "Lemote Fuloong(2e) mini-PC"
 	select ARCH_SPARSEMEM_ENABLE
 	select ARCH_SPARSEMEM_ENABLE
+	select ARCH_MIGHT_HAVE_PC_PARPORT
+	select ARCH_MIGHT_HAVE_PC_SERIO
 	select CEVT_R4K
 	select CEVT_R4K
 	select CSRC_R4K
 	select CSRC_R4K
 	select SYS_HAS_CPU_LOONGSON2E
 	select SYS_HAS_CPU_LOONGSON2E
@@ -33,6 +35,8 @@ config LEMOTE_FULOONG2E
 config LEMOTE_MACH2F
 config LEMOTE_MACH2F
 	bool "Lemote Loongson 2F family machines"
 	bool "Lemote Loongson 2F family machines"
 	select ARCH_SPARSEMEM_ENABLE
 	select ARCH_SPARSEMEM_ENABLE
+	select ARCH_MIGHT_HAVE_PC_PARPORT
+	select ARCH_MIGHT_HAVE_PC_SERIO
 	select BOARD_SCACHE
 	select BOARD_SCACHE
 	select BOOT_ELF32
 	select BOOT_ELF32
 	select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
 	select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
@@ -62,6 +66,8 @@ config LEMOTE_MACH2F
 config LOONGSON_MACH3X
 config LOONGSON_MACH3X
 	bool "Generic Loongson 3 family machines"
 	bool "Generic Loongson 3 family machines"
 	select ARCH_SPARSEMEM_ENABLE
 	select ARCH_SPARSEMEM_ENABLE
+	select ARCH_MIGHT_HAVE_PC_PARPORT
+	select ARCH_MIGHT_HAVE_PC_SERIO
 	select GENERIC_ISA_DMA_SUPPORT_BROKEN
 	select GENERIC_ISA_DMA_SUPPORT_BROKEN
 	select BOOT_ELF32
 	select BOOT_ELF32
 	select BOARD_SCACHE
 	select BOARD_SCACHE

+ 32 - 9
arch/parisc/kernel/cache.c

@@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm)
 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
 	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
 	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
-		flush_tlb_all();
+		if (mm->context)
+			flush_tlb_all();
 		flush_cache_all();
 		flush_cache_all();
 		return;
 		return;
 	}
 	}
@@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
 			pfn = pte_pfn(*ptep);
 			pfn = pte_pfn(*ptep);
 			if (!pfn_valid(pfn))
 			if (!pfn_valid(pfn))
 				continue;
 				continue;
+			if (unlikely(mm->context))
+				flush_tlb_page(vma, addr);
 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
 			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
 		}
 		}
 	}
 	}
@@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm)
 void flush_cache_range(struct vm_area_struct *vma,
 void flush_cache_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
 		unsigned long start, unsigned long end)
 {
 {
+	pgd_t *pgd;
+	unsigned long addr;
+
 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
 	    end - start >= parisc_cache_flush_threshold) {
 	    end - start >= parisc_cache_flush_threshold) {
-		flush_tlb_range(vma, start, end);
+		if (vma->vm_mm->context)
+			flush_tlb_range(vma, start, end);
 		flush_cache_all();
 		flush_cache_all();
 		return;
 		return;
 	}
 	}
 
 
-	flush_user_dcache_range_asm(start, end);
-	if (vma->vm_flags & VM_EXEC)
-		flush_user_icache_range_asm(start, end);
-	flush_tlb_range(vma, start, end);
+	if (vma->vm_mm->context == mfsp(3)) {
+		flush_user_dcache_range_asm(start, end);
+		if (vma->vm_flags & VM_EXEC)
+			flush_user_icache_range_asm(start, end);
+		flush_tlb_range(vma, start, end);
+		return;
+	}
+
+	pgd = vma->vm_mm->pgd;
+	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
+		unsigned long pfn;
+		pte_t *ptep = get_ptep(pgd, addr);
+		if (!ptep)
+			continue;
+		pfn = pte_pfn(*ptep);
+		if (pfn_valid(pfn)) {
+			if (unlikely(vma->vm_mm->context))
+				flush_tlb_page(vma, addr);
+			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+		}
+	}
 }
 }
 
 
 void
 void
 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
 {
 {
-	BUG_ON(!vma->vm_mm->context);
-
 	if (pfn_valid(pfn)) {
 	if (pfn_valid(pfn)) {
-		flush_tlb_page(vma, vmaddr);
+		if (likely(vma->vm_mm->context))
+			flush_tlb_page(vma, vmaddr);
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 	}
 	}
 }
 }

+ 2 - 1
arch/powerpc/boot/Makefile

@@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
 libfdt       := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
 libfdt       := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
 libfdtheader := fdt.h libfdt.h libfdt_internal.h
 libfdtheader := fdt.h libfdt.h libfdt_internal.h
 
 
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
+	treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
 	$(addprefix $(obj)/,$(libfdtheader))
 	$(addprefix $(obj)/,$(libfdtheader))
 
 
 src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
 src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \

+ 0 - 1
arch/powerpc/kernel/prom_init.c

@@ -874,7 +874,6 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
 		.mmu = 0,
 		.mmu = 0,
 		.hash_ext = 0,
 		.hash_ext = 0,
 		.radix_ext = 0,
 		.radix_ext = 0,
-		.byte22 = 0,
 	},
 	},
 
 
 	/* option vector 6: IBM PAPR hints */
 	/* option vector 6: IBM PAPR hints */

+ 43 - 26
arch/powerpc/kvm/book3s_64_mmu_radix.c

@@ -195,6 +195,12 @@ static void kvmppc_pte_free(pte_t *ptep)
 	kmem_cache_free(kvm_pte_cache, ptep);
 	kmem_cache_free(kvm_pte_cache, ptep);
 }
 }
 
 
+/* Like pmd_huge() and pmd_large(), but works regardless of config options */
+static inline int pmd_is_leaf(pmd_t pmd)
+{
+	return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
 			     unsigned int level, unsigned long mmu_seq)
 			     unsigned int level, unsigned long mmu_seq)
 {
 {
@@ -219,7 +225,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
 	else
 	else
 		new_pmd = pmd_alloc_one(kvm->mm, gpa);
 		new_pmd = pmd_alloc_one(kvm->mm, gpa);
 
 
-	if (level == 0 && !(pmd && pmd_present(*pmd)))
+	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
 		new_ptep = kvmppc_pte_alloc();
 		new_ptep = kvmppc_pte_alloc();
 
 
 	/* Check if we might have been invalidated; let the guest retry if so */
 	/* Check if we might have been invalidated; let the guest retry if so */
@@ -244,12 +250,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
 		new_pmd = NULL;
 		new_pmd = NULL;
 	}
 	}
 	pmd = pmd_offset(pud, gpa);
 	pmd = pmd_offset(pud, gpa);
-	if (pmd_large(*pmd)) {
-		/* Someone else has instantiated a large page here; retry */
-		ret = -EAGAIN;
-		goto out_unlock;
-	}
-	if (level == 1 && !pmd_none(*pmd)) {
+	if (pmd_is_leaf(*pmd)) {
+		unsigned long lgpa = gpa & PMD_MASK;
+
+		/*
+		 * If we raced with another CPU which has just put
+		 * a 2MB pte in after we saw a pte page, try again.
+		 */
+		if (level == 0 && !new_ptep) {
+			ret = -EAGAIN;
+			goto out_unlock;
+		}
+		/* Valid 2MB page here already, remove it */
+		old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
+					      ~0UL, 0, lgpa, PMD_SHIFT);
+		kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
+		if (old & _PAGE_DIRTY) {
+			unsigned long gfn = lgpa >> PAGE_SHIFT;
+			struct kvm_memory_slot *memslot;
+			memslot = gfn_to_memslot(kvm, gfn);
+			if (memslot && memslot->dirty_bitmap)
+				kvmppc_update_dirty_map(memslot,
+							gfn, PMD_SIZE);
+		}
+	} else if (level == 1 && !pmd_none(*pmd)) {
 		/*
 		/*
 		 * There's a page table page here, but we wanted
 		 * There's a page table page here, but we wanted
 		 * to install a large page.  Tell the caller and let
 		 * to install a large page.  Tell the caller and let
@@ -412,28 +436,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	} else {
 	} else {
 		page = pages[0];
 		page = pages[0];
 		pfn = page_to_pfn(page);
 		pfn = page_to_pfn(page);
-		if (PageHuge(page)) {
-			page = compound_head(page);
-			pte_size <<= compound_order(page);
+		if (PageCompound(page)) {
+			pte_size <<= compound_order(compound_head(page));
 			/* See if we can insert a 2MB large-page PTE here */
 			/* See if we can insert a 2MB large-page PTE here */
 			if (pte_size >= PMD_SIZE &&
 			if (pte_size >= PMD_SIZE &&
-			    (gpa & PMD_MASK & PAGE_MASK) ==
-			    (hva & PMD_MASK & PAGE_MASK)) {
+			    (gpa & (PMD_SIZE - PAGE_SIZE)) ==
+			    (hva & (PMD_SIZE - PAGE_SIZE))) {
 				level = 1;
 				level = 1;
 				pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
 				pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
 			}
 			}
 		}
 		}
 		/* See if we can provide write access */
 		/* See if we can provide write access */
 		if (writing) {
 		if (writing) {
-			/*
-			 * We assume gup_fast has set dirty on the host PTE.
-			 */
 			pgflags |= _PAGE_WRITE;
 			pgflags |= _PAGE_WRITE;
 		} else {
 		} else {
 			local_irq_save(flags);
 			local_irq_save(flags);
 			ptep = find_current_mm_pte(current->mm->pgd,
 			ptep = find_current_mm_pte(current->mm->pgd,
 						   hva, NULL, NULL);
 						   hva, NULL, NULL);
-			if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
+			if (ptep && pte_write(*ptep))
 				pgflags |= _PAGE_WRITE;
 				pgflags |= _PAGE_WRITE;
 			local_irq_restore(flags);
 			local_irq_restore(flags);
 		}
 		}
@@ -459,18 +479,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		pte = pfn_pte(pfn, __pgprot(pgflags));
 		pte = pfn_pte(pfn, __pgprot(pgflags));
 		ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
 		ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
 	}
 	}
-	if (ret == 0 || ret == -EAGAIN)
-		ret = RESUME_GUEST;
 
 
 	if (page) {
 	if (page) {
-		/*
-		 * We drop pages[0] here, not page because page might
-		 * have been set to the head page of a compound, but
-		 * we have to drop the reference on the correct tail
-		 * page to match the get inside gup()
-		 */
-		put_page(pages[0]);
+		if (!ret && (pgflags & _PAGE_WRITE))
+			set_page_dirty_lock(page);
+		put_page(page);
 	}
 	}
+
+	if (ret == 0 || ret == -EAGAIN)
+		ret = RESUME_GUEST;
 	return ret;
 	return ret;
 }
 }
 
 
@@ -644,7 +661,7 @@ void kvmppc_free_radix(struct kvm *kvm)
 				continue;
 				continue;
 			pmd = pmd_offset(pud, 0);
 			pmd = pmd_offset(pud, 0);
 			for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
 			for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
-				if (pmd_huge(*pmd)) {
+				if (pmd_is_leaf(*pmd)) {
 					pmd_clear(pmd);
 					pmd_clear(pmd);
 					continue;
 					continue;
 				}
 				}

+ 9 - 8
arch/powerpc/kvm/book3s_hv.c

@@ -2885,7 +2885,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	 */
 	 */
 	trace_hardirqs_on();
 	trace_hardirqs_on();
 
 
-	guest_enter();
+	guest_enter_irqoff();
 
 
 	srcu_idx = srcu_read_lock(&vc->kvm->srcu);
 	srcu_idx = srcu_read_lock(&vc->kvm->srcu);
 
 
@@ -2893,8 +2893,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 
 
 	srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
 	srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
 
 
-	guest_exit();
-
 	trace_hardirqs_off();
 	trace_hardirqs_off();
 	set_irq_happened(trap);
 	set_irq_happened(trap);
 
 
@@ -2937,6 +2935,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	kvmppc_set_host_core(pcpu);
 	kvmppc_set_host_core(pcpu);
 
 
 	local_irq_enable();
 	local_irq_enable();
+	guest_exit();
 
 
 	/* Let secondaries go back to the offline loop */
 	/* Let secondaries go back to the offline loop */
 	for (i = 0; i < controlled_threads; ++i) {
 	for (i = 0; i < controlled_threads; ++i) {
@@ -3656,15 +3655,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 		goto up_out;
 		goto up_out;
 
 
 	psize = vma_kernel_pagesize(vma);
 	psize = vma_kernel_pagesize(vma);
-	porder = __ilog2(psize);
 
 
 	up_read(&current->mm->mmap_sem);
 	up_read(&current->mm->mmap_sem);
 
 
 	/* We can handle 4k, 64k or 16M pages in the VRMA */
 	/* We can handle 4k, 64k or 16M pages in the VRMA */
-	err = -EINVAL;
-	if (!(psize == 0x1000 || psize == 0x10000 ||
-	      psize == 0x1000000))
-		goto out_srcu;
+	if (psize >= 0x1000000)
+		psize = 0x1000000;
+	else if (psize >= 0x10000)
+		psize = 0x10000;
+	else
+		psize = 0x1000;
+	porder = __ilog2(psize);
 
 
 	senc = slb_pgsize_encoding(psize);
 	senc = slb_pgsize_encoding(psize);
 	kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
 	kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |

+ 5 - 5
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -320,7 +320,6 @@ kvm_novcpu_exit:
 	stw	r12, STACK_SLOT_TRAP(r1)
 	stw	r12, STACK_SLOT_TRAP(r1)
 	bl	kvmhv_commence_exit
 	bl	kvmhv_commence_exit
 	nop
 	nop
-	lwz	r12, STACK_SLOT_TRAP(r1)
 	b	kvmhv_switch_to_host
 	b	kvmhv_switch_to_host
 
 
 /*
 /*
@@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
 
 secondary_too_late:
 secondary_too_late:
 	li	r12, 0
 	li	r12, 0
+	stw	r12, STACK_SLOT_TRAP(r1)
 	cmpdi	r4, 0
 	cmpdi	r4, 0
 	beq	11f
 	beq	11f
 	stw	r12, VCPU_TRAP(r4)
 	stw	r12, VCPU_TRAP(r4)
@@ -1558,12 +1558,12 @@ mc_cont:
 3:	stw	r5,VCPU_SLB_MAX(r9)
 3:	stw	r5,VCPU_SLB_MAX(r9)
 
 
 guest_bypass:
 guest_bypass:
+	stw	r12, STACK_SLOT_TRAP(r1)
 	mr 	r3, r12
 	mr 	r3, r12
 	/* Increment exit count, poke other threads to exit */
 	/* Increment exit count, poke other threads to exit */
 	bl	kvmhv_commence_exit
 	bl	kvmhv_commence_exit
 	nop
 	nop
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r9, HSTATE_KVM_VCPU(r13)
-	lwz	r12, VCPU_TRAP(r9)
 
 
 	/* Stop others sending VCPU interrupts to this physical CPU */
 	/* Stop others sending VCPU interrupts to this physical CPU */
 	li	r0, -1
 	li	r0, -1
@@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
 	 * POWER7/POWER8 guest -> host partition switch code.
 	 * POWER7/POWER8 guest -> host partition switch code.
 	 * We don't have to lock against tlbies but we do
 	 * We don't have to lock against tlbies but we do
 	 * have to coordinate the hardware threads.
 	 * have to coordinate the hardware threads.
+	 * Here STACK_SLOT_TRAP(r1) contains the trap number.
 	 */
 	 */
 kvmhv_switch_to_host:
 kvmhv_switch_to_host:
 	/* Secondary threads wait for primary to do partition switch */
 	/* Secondary threads wait for primary to do partition switch */
@@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 
 	/* If HMI, call kvmppc_realmode_hmi_handler() */
 	/* If HMI, call kvmppc_realmode_hmi_handler() */
+	lwz	r12, STACK_SLOT_TRAP(r1)
 	cmpwi	r12, BOOK3S_INTERRUPT_HMI
 	cmpwi	r12, BOOK3S_INTERRUPT_HMI
 	bne	27f
 	bne	27f
 	bl	kvmppc_realmode_hmi_handler
 	bl	kvmppc_realmode_hmi_handler
 	nop
 	nop
 	cmpdi	r3, 0
 	cmpdi	r3, 0
-	li	r12, BOOK3S_INTERRUPT_HMI
 	/*
 	/*
 	 * At this point kvmppc_realmode_hmi_handler may have resync-ed
 	 * At this point kvmppc_realmode_hmi_handler may have resync-ed
 	 * the TB, and if it has, we must not subtract the guest timebase
 	 * the TB, and if it has, we must not subtract the guest timebase
@@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION
 	lwz	r8, KVM_SPLIT_DO_RESTORE(r3)
 	lwz	r8, KVM_SPLIT_DO_RESTORE(r3)
 	cmpwi	r8, 0
 	cmpwi	r8, 0
 	beq	47f
 	beq	47f
-	stw	r12, STACK_SLOT_TRAP(r1)
 	bl	kvmhv_p9_restore_lpcr
 	bl	kvmhv_p9_restore_lpcr
 	nop
 	nop
-	lwz	r12, STACK_SLOT_TRAP(r1)
 	b	48f
 	b	48f
 47:
 47:
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
 	li	r0, KVM_GUEST_MODE_NONE
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
 	stb	r0, HSTATE_IN_GUEST(r13)
 
 
+	lwz	r12, STACK_SLOT_TRAP(r1)	/* return trap # in r12 */
 	ld	r0, SFS+PPC_LR_STKOFF(r1)
 	ld	r0, SFS+PPC_LR_STKOFF(r1)
 	addi	r1, r1, SFS
 	addi	r1, r1, SFS
 	mtlr	r0
 	mtlr	r0

+ 3 - 1
arch/powerpc/kvm/powerpc.c

@@ -1345,7 +1345,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
 int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
 int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		unsigned int rt, int is_default_endian)
 		unsigned int rt, int is_default_endian)
 {
 {
-	enum emulation_result emulated;
+	enum emulation_result emulated = EMULATE_DONE;
 
 
 	while (vcpu->arch.mmio_vmx_copy_nums) {
 	while (vcpu->arch.mmio_vmx_copy_nums) {
 		emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
 		emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
@@ -1608,7 +1608,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 
 	kvm_sigset_deactivate(vcpu);
 	kvm_sigset_deactivate(vcpu);
 
 
+#ifdef CONFIG_ALTIVEC
 out:
 out:
+#endif
 	vcpu_put(vcpu);
 	vcpu_put(vcpu);
 	return r;
 	return r;
 }
 }

+ 1 - 0
arch/s390/include/asm/mmu_context.h

@@ -63,6 +63,7 @@ static inline int init_new_context(struct task_struct *tsk,
 				   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
 				   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
 		/* pgd_alloc() did not account this pmd */
 		/* pgd_alloc() did not account this pmd */
 		mm_inc_nr_pmds(mm);
 		mm_inc_nr_pmds(mm);
+		mm_inc_nr_puds(mm);
 	}
 	}
 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
 	return 0;
 	return 0;

+ 6 - 4
arch/s390/kernel/entry.S

@@ -14,6 +14,7 @@
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/cache.h>
 #include <asm/cache.h>
 #include <asm/ctl_reg.h>
 #include <asm/ctl_reg.h>
+#include <asm/dwarf.h>
 #include <asm/errno.h>
 #include <asm/errno.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
@@ -230,7 +231,7 @@ _PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
 	.hidden \name
 	.hidden \name
 	.type \name,@function
 	.type \name,@function
 \name:
 \name:
-	.cfi_startproc
+	CFI_STARTPROC
 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
 	exrl	0,0f
 	exrl	0,0f
 #else
 #else
@@ -239,7 +240,7 @@ _PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
 #endif
 #endif
 	j	.
 	j	.
 0:	br	\reg
 0:	br	\reg
-	.cfi_endproc
+	CFI_ENDPROC
 	.endm
 	.endm
 
 
 	GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
 	GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
@@ -426,13 +427,13 @@ ENTRY(system_call)
 	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
 	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
 	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 	stmg	%r0,%r7,__PT_R0(%r11)
 	stmg	%r0,%r7,__PT_R0(%r11)
-	# clear user controlled register to prevent speculative use
-	xgr	%r0,%r0
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
 	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
 	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 	stg	%r14,__PT_FLAGS(%r11)
 	stg	%r14,__PT_FLAGS(%r11)
 .Lsysc_do_svc:
 .Lsysc_do_svc:
+	# clear user controlled register to prevent speculative use
+	xgr	%r0,%r0
 	# load address of system call table
 	# load address of system call table
 	lg	%r10,__THREAD_sysc_table(%r13,%r12)
 	lg	%r10,__THREAD_sysc_table(%r13,%r12)
 	llgh	%r8,__PT_INT_CODE+2(%r11)
 	llgh	%r8,__PT_INT_CODE+2(%r11)
@@ -1439,6 +1440,7 @@ cleanup_critical:
 	stg	%r15,__LC_SYSTEM_TIMER
 	stg	%r15,__LC_SYSTEM_TIMER
 0:	# update accounting time stamp
 0:	# update accounting time stamp
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 	# set up saved register r11
 	# set up saved register r11
 	lg	%r15,__LC_KERNEL_STACK
 	lg	%r15,__LC_KERNEL_STACK
 	la	%r9,STACK_FRAME_OVERHEAD(%r15)
 	la	%r9,STACK_FRAME_OVERHEAD(%r15)

+ 2 - 2
arch/s390/kernel/nospec-branch.c

@@ -2,8 +2,8 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <asm/nospec-branch.h>
 #include <asm/nospec-branch.h>
 
 
-int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF);
-int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL);
+int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
 
 
 static int __init nospectre_v2_setup_early(char *str)
 static int __init nospectre_v2_setup_early(char *str)
 {
 {

+ 2 - 0
arch/s390/kvm/kvm-s390.c

@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+	{ "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
 	{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
 	{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
 	{ "instruction_gs", VCPU_STAT(instruction_gs) },
 	{ "instruction_gs", VCPU_STAT(instruction_gs) },
@@ -2146,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
 		/* we still need the basic sca for the ipte control */
 		/* we still need the basic sca for the ipte control */
 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+		return;
 	}
 	}
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	read_lock(&vcpu->kvm->arch.sca_lock);
 	if (vcpu->kvm->arch.use_esca) {
 	if (vcpu->kvm->arch.use_esca) {

+ 13 - 6
arch/sparc/mm/tlb.c

@@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
 	pte_unmap(pte);
 	pte_unmap(pte);
 }
 }
 
 
-void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-		pmd_t *pmdp, pmd_t pmd)
-{
-	pmd_t orig = *pmdp;
-
-	*pmdp = pmd;
 
 
+static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
+			   pmd_t orig, pmd_t pmd)
+{
 	if (mm == &init_mm)
 	if (mm == &init_mm)
 		return;
 		return;
 
 
@@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 	}
 	}
 }
 }
 
 
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+		pmd_t *pmdp, pmd_t pmd)
+{
+	pmd_t orig = *pmdp;
+
+	*pmdp = pmd;
+	__set_pmd_acct(mm, addr, orig, pmd);
+}
+
 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
 {
 {
@@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 	do {
 	do {
 		old = *pmdp;
 		old = *pmdp;
 	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
 	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
+	__set_pmd_acct(vma->vm_mm, address, old, pmd);
 
 
 	return old;
 	return old;
 }
 }

+ 1 - 10
arch/x86/Kconfig

@@ -2307,7 +2307,7 @@ choice
 	  it can be used to assist security vulnerability exploitation.
 	  it can be used to assist security vulnerability exploitation.
 
 
 	  This setting can be changed at boot time via the kernel command
 	  This setting can be changed at boot time via the kernel command
-	  line parameter vsyscall=[native|emulate|none].
+	  line parameter vsyscall=[emulate|none].
 
 
 	  On a system with recent enough glibc (2.14 or newer) and no
 	  On a system with recent enough glibc (2.14 or newer) and no
 	  static binaries, you can say None without a performance penalty
 	  static binaries, you can say None without a performance penalty
@@ -2315,15 +2315,6 @@ choice
 
 
 	  If unsure, select "Emulate".
 	  If unsure, select "Emulate".
 
 
-	config LEGACY_VSYSCALL_NATIVE
-		bool "Native"
-		help
-		  Actual executable code is located in the fixed vsyscall
-		  address mapping, implementing time() efficiently. Since
-		  this makes the mapping executable, it can be used during
-		  security vulnerability exploitation (traditionally as
-		  ROP gadgets). This configuration is not recommended.
-
 	config LEGACY_VSYSCALL_EMULATE
 	config LEGACY_VSYSCALL_EMULATE
 		bool "Emulate"
 		bool "Emulate"
 		help
 		help

+ 1 - 15
arch/x86/entry/entry_64_compat.S

@@ -363,9 +363,7 @@ ENTRY(entry_INT80_compat)
 	pushq	2*8(%rdi)		/* regs->ip */
 	pushq	2*8(%rdi)		/* regs->ip */
 	pushq	1*8(%rdi)		/* regs->orig_ax */
 	pushq	1*8(%rdi)		/* regs->orig_ax */
 
 
-	movq	(%rdi), %rdi		/* restore %rdi */
-
-	pushq	%rdi			/* pt_regs->di */
+	pushq	(%rdi)			/* pt_regs->di */
 	pushq	%rsi			/* pt_regs->si */
 	pushq	%rsi			/* pt_regs->si */
 	pushq	%rdx			/* pt_regs->dx */
 	pushq	%rdx			/* pt_regs->dx */
 	pushq	%rcx			/* pt_regs->cx */
 	pushq	%rcx			/* pt_regs->cx */
@@ -406,15 +404,3 @@ ENTRY(entry_INT80_compat)
 	TRACE_IRQS_ON
 	TRACE_IRQS_ON
 	jmp	swapgs_restore_regs_and_return_to_usermode
 	jmp	swapgs_restore_regs_and_return_to_usermode
 END(entry_INT80_compat)
 END(entry_INT80_compat)
-
-ENTRY(stub32_clone)
-	/*
-	 * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
-	 * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
-	 *
-	 * The native 64-bit kernel's sys_clone() implements the latter,
-	 * so we need to swap arguments here before calling it:
-	 */
-	xchg	%r8, %rcx
-	jmp	sys_clone
-ENDPROC(stub32_clone)

+ 19 - 19
arch/x86/entry/syscalls/syscall_32.tbl

@@ -8,12 +8,12 @@
 #
 #
 0	i386	restart_syscall		sys_restart_syscall
 0	i386	restart_syscall		sys_restart_syscall
 1	i386	exit			sys_exit
 1	i386	exit			sys_exit
-2	i386	fork			sys_fork			sys_fork
+2	i386	fork			sys_fork
 3	i386	read			sys_read
 3	i386	read			sys_read
 4	i386	write			sys_write
 4	i386	write			sys_write
 5	i386	open			sys_open			compat_sys_open
 5	i386	open			sys_open			compat_sys_open
 6	i386	close			sys_close
 6	i386	close			sys_close
-7	i386	waitpid			sys_waitpid			sys32_waitpid
+7	i386	waitpid			sys_waitpid			compat_sys_x86_waitpid
 8	i386	creat			sys_creat
 8	i386	creat			sys_creat
 9	i386	link			sys_link
 9	i386	link			sys_link
 10	i386	unlink			sys_unlink
 10	i386	unlink			sys_unlink
@@ -78,7 +78,7 @@
 69	i386	ssetmask		sys_ssetmask
 69	i386	ssetmask		sys_ssetmask
 70	i386	setreuid		sys_setreuid16
 70	i386	setreuid		sys_setreuid16
 71	i386	setregid		sys_setregid16
 71	i386	setregid		sys_setregid16
-72	i386	sigsuspend		sys_sigsuspend			sys_sigsuspend
+72	i386	sigsuspend		sys_sigsuspend
 73	i386	sigpending		sys_sigpending			compat_sys_sigpending
 73	i386	sigpending		sys_sigpending			compat_sys_sigpending
 74	i386	sethostname		sys_sethostname
 74	i386	sethostname		sys_sethostname
 75	i386	setrlimit		sys_setrlimit			compat_sys_setrlimit
 75	i386	setrlimit		sys_setrlimit			compat_sys_setrlimit
@@ -96,7 +96,7 @@
 87	i386	swapon			sys_swapon
 87	i386	swapon			sys_swapon
 88	i386	reboot			sys_reboot
 88	i386	reboot			sys_reboot
 89	i386	readdir			sys_old_readdir			compat_sys_old_readdir
 89	i386	readdir			sys_old_readdir			compat_sys_old_readdir
-90	i386	mmap			sys_old_mmap			sys32_mmap
+90	i386	mmap			sys_old_mmap			compat_sys_x86_mmap
 91	i386	munmap			sys_munmap
 91	i386	munmap			sys_munmap
 92	i386	truncate		sys_truncate			compat_sys_truncate
 92	i386	truncate		sys_truncate			compat_sys_truncate
 93	i386	ftruncate		sys_ftruncate			compat_sys_ftruncate
 93	i386	ftruncate		sys_ftruncate			compat_sys_ftruncate
@@ -126,7 +126,7 @@
 117	i386	ipc			sys_ipc				compat_sys_ipc
 117	i386	ipc			sys_ipc				compat_sys_ipc
 118	i386	fsync			sys_fsync
 118	i386	fsync			sys_fsync
 119	i386	sigreturn		sys_sigreturn			sys32_sigreturn
 119	i386	sigreturn		sys_sigreturn			sys32_sigreturn
-120	i386	clone			sys_clone			stub32_clone
+120	i386	clone			sys_clone			compat_sys_x86_clone
 121	i386	setdomainname		sys_setdomainname
 121	i386	setdomainname		sys_setdomainname
 122	i386	uname			sys_newuname
 122	i386	uname			sys_newuname
 123	i386	modify_ldt		sys_modify_ldt
 123	i386	modify_ldt		sys_modify_ldt
@@ -186,8 +186,8 @@
 177	i386	rt_sigtimedwait		sys_rt_sigtimedwait		compat_sys_rt_sigtimedwait
 177	i386	rt_sigtimedwait		sys_rt_sigtimedwait		compat_sys_rt_sigtimedwait
 178	i386	rt_sigqueueinfo		sys_rt_sigqueueinfo		compat_sys_rt_sigqueueinfo
 178	i386	rt_sigqueueinfo		sys_rt_sigqueueinfo		compat_sys_rt_sigqueueinfo
 179	i386	rt_sigsuspend		sys_rt_sigsuspend
 179	i386	rt_sigsuspend		sys_rt_sigsuspend
-180	i386	pread64			sys_pread64			sys32_pread
-181	i386	pwrite64		sys_pwrite64			sys32_pwrite
+180	i386	pread64			sys_pread64			compat_sys_x86_pread
+181	i386	pwrite64		sys_pwrite64			compat_sys_x86_pwrite
 182	i386	chown			sys_chown16
 182	i386	chown			sys_chown16
 183	i386	getcwd			sys_getcwd
 183	i386	getcwd			sys_getcwd
 184	i386	capget			sys_capget
 184	i386	capget			sys_capget
@@ -196,14 +196,14 @@
 187	i386	sendfile		sys_sendfile			compat_sys_sendfile
 187	i386	sendfile		sys_sendfile			compat_sys_sendfile
 188	i386	getpmsg
 188	i386	getpmsg
 189	i386	putpmsg
 189	i386	putpmsg
-190	i386	vfork			sys_vfork			sys_vfork
+190	i386	vfork			sys_vfork
 191	i386	ugetrlimit		sys_getrlimit			compat_sys_getrlimit
 191	i386	ugetrlimit		sys_getrlimit			compat_sys_getrlimit
 192	i386	mmap2			sys_mmap_pgoff
 192	i386	mmap2			sys_mmap_pgoff
-193	i386	truncate64		sys_truncate64			sys32_truncate64
-194	i386	ftruncate64		sys_ftruncate64			sys32_ftruncate64
-195	i386	stat64			sys_stat64			sys32_stat64
-196	i386	lstat64			sys_lstat64			sys32_lstat64
-197	i386	fstat64			sys_fstat64			sys32_fstat64
+193	i386	truncate64		sys_truncate64			compat_sys_x86_truncate64
+194	i386	ftruncate64		sys_ftruncate64			compat_sys_x86_ftruncate64
+195	i386	stat64			sys_stat64			compat_sys_x86_stat64
+196	i386	lstat64			sys_lstat64			compat_sys_x86_lstat64
+197	i386	fstat64			sys_fstat64			compat_sys_x86_fstat64
 198	i386	lchown32		sys_lchown
 198	i386	lchown32		sys_lchown
 199	i386	getuid32		sys_getuid
 199	i386	getuid32		sys_getuid
 200	i386	getgid32		sys_getgid
 200	i386	getgid32		sys_getgid
@@ -231,7 +231,7 @@
 # 222 is unused
 # 222 is unused
 # 223 is unused
 # 223 is unused
 224	i386	gettid			sys_gettid
 224	i386	gettid			sys_gettid
-225	i386	readahead		sys_readahead			sys32_readahead
+225	i386	readahead		sys_readahead			compat_sys_x86_readahead
 226	i386	setxattr		sys_setxattr
 226	i386	setxattr		sys_setxattr
 227	i386	lsetxattr		sys_lsetxattr
 227	i386	lsetxattr		sys_lsetxattr
 228	i386	fsetxattr		sys_fsetxattr
 228	i386	fsetxattr		sys_fsetxattr
@@ -256,7 +256,7 @@
 247	i386	io_getevents		sys_io_getevents		compat_sys_io_getevents
 247	i386	io_getevents		sys_io_getevents		compat_sys_io_getevents
 248	i386	io_submit		sys_io_submit			compat_sys_io_submit
 248	i386	io_submit		sys_io_submit			compat_sys_io_submit
 249	i386	io_cancel		sys_io_cancel
 249	i386	io_cancel		sys_io_cancel
-250	i386	fadvise64		sys_fadvise64			sys32_fadvise64
+250	i386	fadvise64		sys_fadvise64			compat_sys_x86_fadvise64
 # 251 is available for reuse (was briefly sys_set_zone_reclaim)
 # 251 is available for reuse (was briefly sys_set_zone_reclaim)
 252	i386	exit_group		sys_exit_group
 252	i386	exit_group		sys_exit_group
 253	i386	lookup_dcookie		sys_lookup_dcookie		compat_sys_lookup_dcookie
 253	i386	lookup_dcookie		sys_lookup_dcookie		compat_sys_lookup_dcookie
@@ -278,7 +278,7 @@
 269	i386	fstatfs64		sys_fstatfs64			compat_sys_fstatfs64
 269	i386	fstatfs64		sys_fstatfs64			compat_sys_fstatfs64
 270	i386	tgkill			sys_tgkill
 270	i386	tgkill			sys_tgkill
 271	i386	utimes			sys_utimes			compat_sys_utimes
 271	i386	utimes			sys_utimes			compat_sys_utimes
-272	i386	fadvise64_64		sys_fadvise64_64		sys32_fadvise64_64
+272	i386	fadvise64_64		sys_fadvise64_64		compat_sys_x86_fadvise64_64
 273	i386	vserver
 273	i386	vserver
 274	i386	mbind			sys_mbind
 274	i386	mbind			sys_mbind
 275	i386	get_mempolicy		sys_get_mempolicy		compat_sys_get_mempolicy
 275	i386	get_mempolicy		sys_get_mempolicy		compat_sys_get_mempolicy
@@ -306,7 +306,7 @@
 297	i386	mknodat			sys_mknodat
 297	i386	mknodat			sys_mknodat
 298	i386	fchownat		sys_fchownat
 298	i386	fchownat		sys_fchownat
 299	i386	futimesat		sys_futimesat			compat_sys_futimesat
 299	i386	futimesat		sys_futimesat			compat_sys_futimesat
-300	i386	fstatat64		sys_fstatat64			sys32_fstatat
+300	i386	fstatat64		sys_fstatat64			compat_sys_x86_fstatat
 301	i386	unlinkat		sys_unlinkat
 301	i386	unlinkat		sys_unlinkat
 302	i386	renameat		sys_renameat
 302	i386	renameat		sys_renameat
 303	i386	linkat			sys_linkat
 303	i386	linkat			sys_linkat
@@ -320,7 +320,7 @@
 311	i386	set_robust_list		sys_set_robust_list		compat_sys_set_robust_list
 311	i386	set_robust_list		sys_set_robust_list		compat_sys_set_robust_list
 312	i386	get_robust_list		sys_get_robust_list		compat_sys_get_robust_list
 312	i386	get_robust_list		sys_get_robust_list		compat_sys_get_robust_list
 313	i386	splice			sys_splice
 313	i386	splice			sys_splice
-314	i386	sync_file_range		sys_sync_file_range		sys32_sync_file_range
+314	i386	sync_file_range		sys_sync_file_range		compat_sys_x86_sync_file_range
 315	i386	tee			sys_tee
 315	i386	tee			sys_tee
 316	i386	vmsplice		sys_vmsplice			compat_sys_vmsplice
 316	i386	vmsplice		sys_vmsplice			compat_sys_vmsplice
 317	i386	move_pages		sys_move_pages			compat_sys_move_pages
 317	i386	move_pages		sys_move_pages			compat_sys_move_pages
@@ -330,7 +330,7 @@
 321	i386	signalfd		sys_signalfd			compat_sys_signalfd
 321	i386	signalfd		sys_signalfd			compat_sys_signalfd
 322	i386	timerfd_create		sys_timerfd_create
 322	i386	timerfd_create		sys_timerfd_create
 323	i386	eventfd			sys_eventfd
 323	i386	eventfd			sys_eventfd
-324	i386	fallocate		sys_fallocate			sys32_fallocate
+324	i386	fallocate		sys_fallocate			compat_sys_x86_fallocate
 325	i386	timerfd_settime		sys_timerfd_settime		compat_sys_timerfd_settime
 325	i386	timerfd_settime		sys_timerfd_settime		compat_sys_timerfd_settime
 326	i386	timerfd_gettime		sys_timerfd_gettime		compat_sys_timerfd_gettime
 326	i386	timerfd_gettime		sys_timerfd_gettime		compat_sys_timerfd_gettime
 327	i386	signalfd4		sys_signalfd4			compat_sys_signalfd4
 327	i386	signalfd4		sys_signalfd4			compat_sys_signalfd4

+ 3 - 13
arch/x86/entry/vsyscall/vsyscall_64.c

@@ -42,10 +42,8 @@
 #define CREATE_TRACE_POINTS
 #define CREATE_TRACE_POINTS
 #include "vsyscall_trace.h"
 #include "vsyscall_trace.h"
 
 
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
-#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
-	NATIVE;
-#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
+static enum { EMULATE, NONE } vsyscall_mode =
+#ifdef CONFIG_LEGACY_VSYSCALL_NONE
 	NONE;
 	NONE;
 #else
 #else
 	EMULATE;
 	EMULATE;
@@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str)
 	if (str) {
 	if (str) {
 		if (!strcmp("emulate", str))
 		if (!strcmp("emulate", str))
 			vsyscall_mode = EMULATE;
 			vsyscall_mode = EMULATE;
-		else if (!strcmp("native", str))
-			vsyscall_mode = NATIVE;
 		else if (!strcmp("none", str))
 		else if (!strcmp("none", str))
 			vsyscall_mode = NONE;
 			vsyscall_mode = NONE;
 		else
 		else
@@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
 
 	WARN_ON_ONCE(address != regs->ip);
 	WARN_ON_ONCE(address != regs->ip);
 
 
-	/* This should be unreachable in NATIVE mode. */
-	if (WARN_ON(vsyscall_mode == NATIVE))
-		return false;
-
 	if (vsyscall_mode == NONE) {
 	if (vsyscall_mode == NONE) {
 		warn_bad_vsyscall(KERN_INFO, regs,
 		warn_bad_vsyscall(KERN_INFO, regs,
 				  "vsyscall attempted with vsyscall=none");
 				  "vsyscall attempted with vsyscall=none");
@@ -370,9 +362,7 @@ void __init map_vsyscall(void)
 
 
 	if (vsyscall_mode != NONE) {
 	if (vsyscall_mode != NONE) {
 		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
 		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
-			     vsyscall_mode == NATIVE
-			     ? PAGE_KERNEL_VSYSCALL
-			     : PAGE_KERNEL_VVAR);
+			     PAGE_KERNEL_VVAR);
 		set_vsyscall_pgtable_user_bits(swapper_pg_dir);
 		set_vsyscall_pgtable_user_bits(swapper_pg_dir);
 	}
 	}
 
 

+ 1 - 1
arch/x86/events/intel/uncore_snbep.c

@@ -3606,7 +3606,7 @@ static struct intel_uncore_type skx_uncore_imc = {
 };
 };
 
 
 static struct attribute *skx_upi_uncore_formats_attr[] = {
 static struct attribute *skx_upi_uncore_formats_attr[] = {
-	&format_attr_event_ext.attr,
+	&format_attr_event.attr,
 	&format_attr_umask_ext.attr,
 	&format_attr_umask_ext.attr,
 	&format_attr_edge.attr,
 	&format_attr_edge.attr,
 	&format_attr_inv.attr,
 	&format_attr_inv.attr,

+ 44 - 30
arch/x86/ia32/sys_ia32.c

@@ -51,15 +51,14 @@
 #define AA(__x)		((unsigned long)(__x))
 #define AA(__x)		((unsigned long)(__x))
 
 
 
 
-asmlinkage long sys32_truncate64(const char __user *filename,
-				 unsigned long offset_low,
-				 unsigned long offset_high)
+COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
+		       unsigned long, offset_low, unsigned long, offset_high)
 {
 {
        return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
        return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
 }
 }
 
 
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
-				  unsigned long offset_high)
+COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
+		       unsigned long, offset_low, unsigned long, offset_high)
 {
 {
        return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
        return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
 }
 }
@@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
 	return 0;
 	return 0;
 }
 }
 
 
-asmlinkage long sys32_stat64(const char __user *filename,
-			     struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
+		       struct stat64 __user *, statbuf)
 {
 {
 	struct kstat stat;
 	struct kstat stat;
 	int ret = vfs_stat(filename, &stat);
 	int ret = vfs_stat(filename, &stat);
@@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename,
 	return ret;
 	return ret;
 }
 }
 
 
-asmlinkage long sys32_lstat64(const char __user *filename,
-			      struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
+		       struct stat64 __user *, statbuf)
 {
 {
 	struct kstat stat;
 	struct kstat stat;
 	int ret = vfs_lstat(filename, &stat);
 	int ret = vfs_lstat(filename, &stat);
@@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename,
 	return ret;
 	return ret;
 }
 }
 
 
-asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
+		       struct stat64 __user *, statbuf)
 {
 {
 	struct kstat stat;
 	struct kstat stat;
 	int ret = vfs_fstat(fd, &stat);
 	int ret = vfs_fstat(fd, &stat);
@@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
 	return ret;
 	return ret;
 }
 }
 
 
-asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename,
-			      struct stat64 __user *statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
+		       const char __user *, filename,
+		       struct stat64 __user *, statbuf, int, flag)
 {
 {
 	struct kstat stat;
 	struct kstat stat;
 	int error;
 	int error;
@@ -153,7 +154,7 @@ struct mmap_arg_struct32 {
 	unsigned int offset;
 	unsigned int offset;
 };
 };
 
 
-asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
+COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
 {
 {
 	struct mmap_arg_struct32 a;
 	struct mmap_arg_struct32 a;
 
 
@@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
 			       a.offset>>PAGE_SHIFT);
 			       a.offset>>PAGE_SHIFT);
 }
 }
 
 
-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
-			      int options)
+COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *,
+		       stat_addr, int, options)
 {
 {
 	return compat_sys_wait4(pid, stat_addr, options, NULL);
 	return compat_sys_wait4(pid, stat_addr, options, NULL);
 }
 }
 
 
 /* warning: next two assume little endian */
 /* warning: next two assume little endian */
-asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
-			    u32 poslo, u32 poshi)
+COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
+		       u32, count, u32, poslo, u32, poshi)
 {
 {
 	return sys_pread64(fd, ubuf, count,
 	return sys_pread64(fd, ubuf, count,
 			 ((loff_t)AA(poshi) << 32) | AA(poslo));
 			 ((loff_t)AA(poshi) << 32) | AA(poslo));
 }
 }
 
 
-asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
-			     u32 count, u32 poslo, u32 poshi)
+COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
+		       u32, count, u32, poslo, u32, poshi)
 {
 {
 	return sys_pwrite64(fd, ubuf, count,
 	return sys_pwrite64(fd, ubuf, count,
 			  ((loff_t)AA(poshi) << 32) | AA(poslo));
 			  ((loff_t)AA(poshi) << 32) | AA(poslo));
@@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
  * Some system calls that need sign extended arguments. This could be
  * Some system calls that need sign extended arguments. This could be
  * done by a generic wrapper.
  * done by a generic wrapper.
  */
  */
-long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
-			__u32 len_low, __u32 len_high, int advice)
+COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
+		       __u32, offset_high, __u32, len_low, __u32, len_high,
+		       int, advice)
 {
 {
 	return sys_fadvise64_64(fd,
 	return sys_fadvise64_64(fd,
 			       (((u64)offset_high)<<32) | offset_low,
 			       (((u64)offset_high)<<32) | offset_low,
@@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
 				advice);
 				advice);
 }
 }
 
 
-asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
-				   size_t count)
+COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
+		       unsigned int, off_hi, size_t, count)
 {
 {
 	return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
 	return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
 }
 }
 
 
-asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
-				      unsigned n_low, unsigned n_hi,  int flags)
+COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
+		       unsigned int, off_hi, unsigned int, n_low,
+		       unsigned int, n_hi, int, flags)
 {
 {
 	return sys_sync_file_range(fd,
 	return sys_sync_file_range(fd,
 				   ((u64)off_hi << 32) | off_low,
 				   ((u64)off_hi << 32) | off_low,
 				   ((u64)n_hi << 32) | n_low, flags);
 				   ((u64)n_hi << 32) | n_low, flags);
 }
 }
 
 
-asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
-				size_t len, int advice)
+COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
+		       unsigned int, offset_hi, size_t, len, int, advice)
 {
 {
 	return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
 	return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
 				len, advice);
 				len, advice);
 }
 }
 
 
-asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
-				unsigned offset_hi, unsigned len_lo,
-				unsigned len_hi)
+COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
+		       unsigned int, offset_lo, unsigned int, offset_hi,
+		       unsigned int, len_lo, unsigned int, len_hi)
 {
 {
 	return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
 	return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
 			     ((u64)len_hi << 32) | len_lo);
 			     ((u64)len_hi << 32) | len_lo);
 }
 }
+
+/*
+ * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
+ */
+COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
+		       unsigned long, newsp, int __user *, parent_tidptr,
+		       unsigned long, tls_val, int __user *, child_tidptr)
+{
+	return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
+			tls_val);
+}

+ 2 - 0
arch/x86/include/asm/cpufeatures.h

@@ -316,6 +316,7 @@
 #define X86_FEATURE_VPCLMULQDQ		(16*32+10) /* Carry-Less Multiplication Double Quadword */
 #define X86_FEATURE_VPCLMULQDQ		(16*32+10) /* Carry-Less Multiplication Double Quadword */
 #define X86_FEATURE_AVX512_VNNI		(16*32+11) /* Vector Neural Network Instructions */
 #define X86_FEATURE_AVX512_VNNI		(16*32+11) /* Vector Neural Network Instructions */
 #define X86_FEATURE_AVX512_BITALG	(16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
 #define X86_FEATURE_AVX512_BITALG	(16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_TME			(16*32+13) /* Intel Total Memory Encryption */
 #define X86_FEATURE_AVX512_VPOPCNTDQ	(16*32+14) /* POPCNT for vectors of DW/QW */
 #define X86_FEATURE_AVX512_VPOPCNTDQ	(16*32+14) /* POPCNT for vectors of DW/QW */
 #define X86_FEATURE_LA57		(16*32+16) /* 5-level page tables */
 #define X86_FEATURE_LA57		(16*32+16) /* 5-level page tables */
 #define X86_FEATURE_RDPID		(16*32+22) /* RDPID instruction */
 #define X86_FEATURE_RDPID		(16*32+22) /* RDPID instruction */
@@ -328,6 +329,7 @@
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */

+ 1 - 0
arch/x86/include/asm/microcode.h

@@ -39,6 +39,7 @@ struct device;
 
 
 enum ucode_state {
 enum ucode_state {
 	UCODE_OK	= 0,
 	UCODE_OK	= 0,
+	UCODE_NEW,
 	UCODE_UPDATED,
 	UCODE_UPDATED,
 	UCODE_NFOUND,
 	UCODE_NFOUND,
 	UCODE_ERROR,
 	UCODE_ERROR,

+ 4 - 1
arch/x86/include/asm/nospec-branch.h

@@ -183,7 +183,10 @@
  * otherwise we'll run out of registers. We don't care about CET
  * otherwise we'll run out of registers. We don't care about CET
  * here, anyway.
  * here, anyway.
  */
  */
-# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",	\
+# define CALL_NOSPEC						\
+	ALTERNATIVE(						\
+	ANNOTATE_RETPOLINE_SAFE					\
+	"call *%[thunk_target]\n",				\
 	"       jmp    904f;\n"					\
 	"       jmp    904f;\n"					\
 	"       .align 16\n"					\
 	"       .align 16\n"					\
 	"901:	call   903f;\n"					\
 	"901:	call   903f;\n"					\

+ 0 - 2
arch/x86/include/asm/pgtable_types.h

@@ -174,7 +174,6 @@ enum page_cache_mode {
 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_NOCACHE)
 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_NOCACHE)
-#define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
 #define __PAGE_KERNEL_VVAR		(__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_VVAR		(__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -206,7 +205,6 @@ enum page_cache_mode {
 #define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
 #define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
 #define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
 #define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
 #define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
 #define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
-#define PAGE_KERNEL_VSYSCALL	__pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
 #define PAGE_KERNEL_VVAR	__pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
 #define PAGE_KERNEL_VVAR	__pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
 
 
 #define PAGE_KERNEL_IO		__pgprot(__PAGE_KERNEL_IO)
 #define PAGE_KERNEL_IO		__pgprot(__PAGE_KERNEL_IO)

+ 1 - 0
arch/x86/include/asm/sections.h

@@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
 
 
 #if defined(CONFIG_X86_64)
 #if defined(CONFIG_X86_64)
 extern char __end_rodata_hpage_align[];
 extern char __end_rodata_hpage_align[];
+extern char __entry_trampoline_start[], __entry_trampoline_end[];
 #endif
 #endif
 
 
 #endif	/* _ASM_X86_SECTIONS_H */
 #endif	/* _ASM_X86_SECTIONS_H */

+ 30 - 18
arch/x86/include/asm/sys_ia32.h

@@ -20,31 +20,43 @@
 #include <asm/ia32.h>
 #include <asm/ia32.h>
 
 
 /* ia32/sys_ia32.c */
 /* ia32/sys_ia32.c */
-asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
-asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
+asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long,
+					  unsigned long);
+asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long,
+					   unsigned long);
 
 
-asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
-asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
-asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *);
-asmlinkage long sys32_fstatat(unsigned int, const char __user *,
+asmlinkage long compat_sys_x86_stat64(const char __user *,
+				      struct stat64 __user *);
+asmlinkage long compat_sys_x86_lstat64(const char __user *,
+				       struct stat64 __user *);
+asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *);
+asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *,
 			      struct stat64 __user *, int);
 			      struct stat64 __user *, int);
 struct mmap_arg_struct32;
 struct mmap_arg_struct32;
-asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
+asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *);
 
 
-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
+asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *,
+				       int);
 
 
-asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
-asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
+asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32,
+				     u32);
+asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32,
+				      u32, u32);
 
 
-long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
-long sys32_vm86_warning(void);
+asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32,
+					    int);
 
 
-asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
-asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
-				      unsigned, unsigned, int);
-asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
-asmlinkage long sys32_fallocate(int, int, unsigned,
-				unsigned, unsigned, unsigned);
+asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int,
+					    size_t);
+asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int,
+					       unsigned int, unsigned int,
+					       int);
+asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
+					 size_t, int);
+asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
+					 unsigned int, unsigned int);
+asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
+				     unsigned long, int __user *);
 
 
 /* ia32/ia32_signal.c */
 /* ia32/ia32_signal.c */
 asmlinkage long sys32_sigreturn(void);
 asmlinkage long sys32_sigreturn(void);

+ 1 - 0
arch/x86/include/asm/vmx.h

@@ -352,6 +352,7 @@ enum vmcs_field {
 #define INTR_TYPE_NMI_INTR		(2 << 8) /* NMI */
 #define INTR_TYPE_NMI_INTR		(2 << 8) /* NMI */
 #define INTR_TYPE_HARD_EXCEPTION	(3 << 8) /* processor exception */
 #define INTR_TYPE_HARD_EXCEPTION	(3 << 8) /* processor exception */
 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
+#define INTR_TYPE_PRIV_SW_EXCEPTION	(5 << 8) /* ICE breakpoint - undocumented */
 #define INTR_TYPE_SOFT_EXCEPTION	(6 << 8) /* software exception */
 #define INTR_TYPE_SOFT_EXCEPTION	(6 << 8) /* software exception */
 
 
 /* GUEST_INTERRUPTIBILITY_INFO flags. */
 /* GUEST_INTERRUPTIBILITY_INFO flags. */

+ 1 - 0
arch/x86/include/uapi/asm/mce.h

@@ -30,6 +30,7 @@ struct mce {
 	__u64 synd;	/* MCA_SYND MSR: only valid on SMCA systems */
 	__u64 synd;	/* MCA_SYND MSR: only valid on SMCA systems */
 	__u64 ipid;	/* MCA_IPID MSR: only valid on SMCA systems */
 	__u64 ipid;	/* MCA_IPID MSR: only valid on SMCA systems */
 	__u64 ppin;	/* Protected Processor Inventory Number */
 	__u64 ppin;	/* Protected Processor Inventory Number */
+	__u32 microcode;/* Microcode revision */
 };
 };
 
 
 #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)
 #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)

+ 8 - 2
arch/x86/kernel/cpu/intel.c

@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
 /*
 /*
  * Early microcode releases for the Spectre v2 mitigation were broken.
  * Early microcode releases for the Spectre v2 mitigation were broken.
  * Information taken from;
  * Information taken from;
- * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
+ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
  * - https://kb.vmware.com/s/article/52345
  * - https://kb.vmware.com/s/article/52345
  * - Microcode revisions observed in the wild
  * - Microcode revisions observed in the wild
  * - Release note from 20180108 microcode release
  * - Release note from 20180108 microcode release
@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
 	{ INTEL_FAM6_KABYLAKE_MOBILE,	0x09,	0x80 },
 	{ INTEL_FAM6_KABYLAKE_MOBILE,	0x09,	0x80 },
 	{ INTEL_FAM6_SKYLAKE_X,		0x03,	0x0100013e },
 	{ INTEL_FAM6_SKYLAKE_X,		0x03,	0x0100013e },
 	{ INTEL_FAM6_SKYLAKE_X,		0x04,	0x0200003c },
 	{ INTEL_FAM6_SKYLAKE_X,		0x04,	0x0200003c },
-	{ INTEL_FAM6_SKYLAKE_DESKTOP,	0x03,	0xc2 },
 	{ INTEL_FAM6_BROADWELL_CORE,	0x04,	0x28 },
 	{ INTEL_FAM6_BROADWELL_CORE,	0x04,	0x28 },
 	{ INTEL_FAM6_BROADWELL_GT3E,	0x01,	0x1b },
 	{ INTEL_FAM6_BROADWELL_GT3E,	0x01,	0x1b },
 	{ INTEL_FAM6_BROADWELL_XEON_D,	0x02,	0x14 },
 	{ INTEL_FAM6_BROADWELL_XEON_D,	0x02,	0x14 },
@@ -144,6 +143,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
 {
 {
 	int i;
 	int i;
 
 
+	/*
+	 * We know that the hypervisor lie to us on the microcode version so
+	 * we may as well hope that it is running the correct version.
+	 */
+	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+		return false;
+
 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
 		if (c->x86_model == spectre_bad_microcodes[i].model &&
 		if (c->x86_model == spectre_bad_microcodes[i].model &&
 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)

+ 24 - 2
arch/x86/kernel/cpu/mcheck/mce.c

@@ -56,6 +56,9 @@
 
 
 static DEFINE_MUTEX(mce_log_mutex);
 static DEFINE_MUTEX(mce_log_mutex);
 
 
+/* sysfs synchronization */
+static DEFINE_MUTEX(mce_sysfs_mutex);
+
 #define CREATE_TRACE_POINTS
 #define CREATE_TRACE_POINTS
 #include <trace/events/mce.h>
 #include <trace/events/mce.h>
 
 
@@ -130,6 +133,8 @@ void mce_setup(struct mce *m)
 
 
 	if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
 	if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
 		rdmsrl(MSR_PPIN, m->ppin);
 		rdmsrl(MSR_PPIN, m->ppin);
+
+	m->microcode = boot_cpu_data.microcode;
 }
 }
 
 
 DEFINE_PER_CPU(struct mce, injectm);
 DEFINE_PER_CPU(struct mce, injectm);
@@ -262,7 +267,7 @@ static void __print_mce(struct mce *m)
 	 */
 	 */
 	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
 		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
-		cpu_data(m->extcpu).microcode);
+		m->microcode);
 }
 }
 
 
 static void print_mce(struct mce *m)
 static void print_mce(struct mce *m)
@@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s,
 	if (kstrtou64(buf, 0, &new) < 0)
 	if (kstrtou64(buf, 0, &new) < 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	mutex_lock(&mce_sysfs_mutex);
 	if (mca_cfg.ignore_ce ^ !!new) {
 	if (mca_cfg.ignore_ce ^ !!new) {
 		if (new) {
 		if (new) {
 			/* disable ce features */
 			/* disable ce features */
@@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s,
 			on_each_cpu(mce_enable_ce, (void *)1, 1);
 			on_each_cpu(mce_enable_ce, (void *)1, 1);
 		}
 		}
 	}
 	}
+	mutex_unlock(&mce_sysfs_mutex);
+
 	return size;
 	return size;
 }
 }
 
 
@@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s,
 	if (kstrtou64(buf, 0, &new) < 0)
 	if (kstrtou64(buf, 0, &new) < 0)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	mutex_lock(&mce_sysfs_mutex);
 	if (mca_cfg.cmci_disabled ^ !!new) {
 	if (mca_cfg.cmci_disabled ^ !!new) {
 		if (new) {
 		if (new) {
 			/* disable cmci */
 			/* disable cmci */
@@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s,
 			on_each_cpu(mce_enable_ce, NULL, 1);
 			on_each_cpu(mce_enable_ce, NULL, 1);
 		}
 		}
 	}
 	}
+	mutex_unlock(&mce_sysfs_mutex);
+
 	return size;
 	return size;
 }
 }
 
 
@@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s,
 				      struct device_attribute *attr,
 				      struct device_attribute *attr,
 				      const char *buf, size_t size)
 				      const char *buf, size_t size)
 {
 {
-	ssize_t ret = device_store_int(s, attr, buf, size);
+	unsigned long old_check_interval = check_interval;
+	ssize_t ret = device_store_ulong(s, attr, buf, size);
+
+	if (check_interval == old_check_interval)
+		return ret;
+
+	if (check_interval < 1)
+		check_interval = 1;
+
+	mutex_lock(&mce_sysfs_mutex);
 	mce_restart();
 	mce_restart();
+	mutex_unlock(&mce_sysfs_mutex);
+
 	return ret;
 	return ret;
 }
 }
 
 

+ 21 - 13
arch/x86/kernel/cpu/microcode/amd.c

@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
 	ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
-	if (ret != UCODE_OK)
+	if (ret > UCODE_UPDATED)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	return 0;
 	return 0;
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
 static enum ucode_state
 static enum ucode_state
 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
 {
 {
+	struct ucode_patch *p;
 	enum ucode_state ret;
 	enum ucode_state ret;
 
 
 	/* free old equiv table */
 	/* free old equiv table */
 	free_equiv_cpu_table();
 	free_equiv_cpu_table();
 
 
 	ret = __load_microcode_amd(family, data, size);
 	ret = __load_microcode_amd(family, data, size);
-
-	if (ret != UCODE_OK)
+	if (ret != UCODE_OK) {
 		cleanup();
 		cleanup();
+		return ret;
+	}
 
 
-#ifdef CONFIG_X86_32
-	/* save BSP's matching patch for early load */
-	if (save) {
-		struct ucode_patch *p = find_patch(0);
-		if (p) {
-			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
-			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
-							       PATCH_MAX_SIZE));
-		}
+	p = find_patch(0);
+	if (!p) {
+		return ret;
+	} else {
+		if (boot_cpu_data.microcode == p->patch_id)
+			return ret;
+
+		ret = UCODE_NEW;
 	}
 	}
-#endif
+
+	/* save BSP's matching patch for early load */
+	if (!save)
+		return ret;
+
+	memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+	memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
+
 	return ret;
 	return ret;
 }
 }
 
 

+ 135 - 43
arch/x86/kernel/cpu/microcode/core.c

@@ -22,13 +22,16 @@
 #define pr_fmt(fmt) "microcode: " fmt
 #define pr_fmt(fmt) "microcode: " fmt
 
 
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
+#include <linux/stop_machine.h>
 #include <linux/syscore_ops.h>
 #include <linux/syscore_ops.h>
 #include <linux/miscdevice.h>
 #include <linux/miscdevice.h>
 #include <linux/capability.h>
 #include <linux/capability.h>
 #include <linux/firmware.h>
 #include <linux/firmware.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/mutex.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
+#include <linux/nmi.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 
 
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
  */
  */
 static DEFINE_MUTEX(microcode_mutex);
 static DEFINE_MUTEX(microcode_mutex);
 
 
+/*
+ * Serialize late loading so that CPUs get updated one-by-one.
+ */
+static DEFINE_SPINLOCK(update_lock);
+
 struct ucode_cpu_info		ucode_cpu_info[NR_CPUS];
 struct ucode_cpu_info		ucode_cpu_info[NR_CPUS];
 
 
 struct cpu_info_ctx {
 struct cpu_info_ctx {
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
 	return ret;
 	return ret;
 }
 }
 
 
-struct apply_microcode_ctx {
-	enum ucode_state err;
-};
-
 static void apply_microcode_local(void *arg)
 static void apply_microcode_local(void *arg)
 {
 {
-	struct apply_microcode_ctx *ctx = arg;
+	enum ucode_state *err = arg;
 
 
-	ctx->err = microcode_ops->apply_microcode(smp_processor_id());
+	*err = microcode_ops->apply_microcode(smp_processor_id());
 }
 }
 
 
 static int apply_microcode_on_target(int cpu)
 static int apply_microcode_on_target(int cpu)
 {
 {
-	struct apply_microcode_ctx ctx = { .err = 0 };
+	enum ucode_state err;
 	int ret;
 	int ret;
 
 
-	ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
-	if (!ret)
-		ret = ctx.err;
-
+	ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
+	if (!ret) {
+		if (err == UCODE_ERROR)
+			ret = 1;
+	}
 	return ret;
 	return ret;
 }
 }
 
 
@@ -489,19 +494,114 @@ static void __exit microcode_dev_exit(void)
 /* fake device for request_firmware */
 /* fake device for request_firmware */
 static struct platform_device	*microcode_pdev;
 static struct platform_device	*microcode_pdev;
 
 
-static enum ucode_state reload_for_cpu(int cpu)
+/*
+ * Late loading dance. Why the heavy-handed stomp_machine effort?
+ *
+ * - HT siblings must be idle and not execute other code while the other sibling
+ *   is loading microcode in order to avoid any negative interactions caused by
+ *   the loading.
+ *
+ * - In addition, microcode update on the cores must be serialized until this
+ *   requirement can be relaxed in the future. Right now, this is conservative
+ *   and good.
+ */
+#define SPINUNIT 100 /* 100 nsec */
+
+static int check_online_cpus(void)
 {
 {
-	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-	enum ucode_state ustate;
+	if (num_online_cpus() == num_present_cpus())
+		return 0;
 
 
-	if (!uci->valid)
-		return UCODE_OK;
+	pr_err("Not all CPUs online, aborting microcode update.\n");
+
+	return -EINVAL;
+}
+
+static atomic_t late_cpus_in;
+static atomic_t late_cpus_out;
+
+static int __wait_for_cpus(atomic_t *t, long long timeout)
+{
+	int all_cpus = num_online_cpus();
+
+	atomic_inc(t);
 
 
-	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
-	if (ustate != UCODE_OK)
-		return ustate;
+	while (atomic_read(t) < all_cpus) {
+		if (timeout < SPINUNIT) {
+			pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
+				all_cpus - atomic_read(t));
+			return 1;
+		}
 
 
-	return apply_microcode_on_target(cpu);
+		ndelay(SPINUNIT);
+		timeout -= SPINUNIT;
+
+		touch_nmi_watchdog();
+	}
+	return 0;
+}
+
+/*
+ * Returns:
+ * < 0 - on error
+ *   0 - no update done
+ *   1 - microcode was updated
+ */
+static int __reload_late(void *info)
+{
+	int cpu = smp_processor_id();
+	enum ucode_state err;
+	int ret = 0;
+
+	/*
+	 * Wait for all CPUs to arrive. A load will not be attempted unless all
+	 * CPUs show up.
+	 * */
+	if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
+		return -1;
+
+	spin_lock(&update_lock);
+	apply_microcode_local(&err);
+	spin_unlock(&update_lock);
+
+	if (err > UCODE_NFOUND) {
+		pr_warn("Error reloading microcode on CPU %d\n", cpu);
+		return -1;
+	/* siblings return UCODE_OK because their engine got updated already */
+	} else if (err == UCODE_UPDATED || err == UCODE_OK) {
+		ret = 1;
+	} else {
+		return ret;
+	}
+
+	/*
+	 * Increase the wait timeout to a safe value here since we're
+	 * serializing the microcode update and that could take a while on a
+	 * large number of CPUs. And that is fine as the *actual* timeout will
+	 * be determined by the last CPU finished updating and thus cut short.
+	 */
+	if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
+		panic("Timeout during microcode update!\n");
+
+	return ret;
+}
+
+/*
+ * Reload microcode late on all CPUs. Wait for a sec until they
+ * all gather together.
+ */
+static int microcode_reload_late(void)
+{
+	int ret;
+
+	atomic_set(&late_cpus_in,  0);
+	atomic_set(&late_cpus_out, 0);
+
+	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+	if (ret > 0)
+		microcode_check();
+
+	return ret;
 }
 }
 
 
 static ssize_t reload_store(struct device *dev,
 static ssize_t reload_store(struct device *dev,
@@ -509,10 +609,9 @@ static ssize_t reload_store(struct device *dev,
 			    const char *buf, size_t size)
 			    const char *buf, size_t size)
 {
 {
 	enum ucode_state tmp_ret = UCODE_OK;
 	enum ucode_state tmp_ret = UCODE_OK;
-	bool do_callback = false;
+	int bsp = boot_cpu_data.cpu_index;
 	unsigned long val;
 	unsigned long val;
 	ssize_t ret = 0;
 	ssize_t ret = 0;
-	int cpu;
 
 
 	ret = kstrtoul(buf, 0, &val);
 	ret = kstrtoul(buf, 0, &val);
 	if (ret)
 	if (ret)
@@ -521,29 +620,24 @@ static ssize_t reload_store(struct device *dev,
 	if (val != 1)
 	if (val != 1)
 		return size;
 		return size;
 
 
-	get_online_cpus();
-	mutex_lock(&microcode_mutex);
-	for_each_online_cpu(cpu) {
-		tmp_ret = reload_for_cpu(cpu);
-		if (tmp_ret > UCODE_NFOUND) {
-			pr_warn("Error reloading microcode on CPU %d\n", cpu);
-
-			/* set retval for the first encountered reload error */
-			if (!ret)
-				ret = -EINVAL;
-		}
+	tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+	if (tmp_ret != UCODE_NEW)
+		return size;
 
 
-		if (tmp_ret == UCODE_UPDATED)
-			do_callback = true;
-	}
+	get_online_cpus();
 
 
-	if (!ret && do_callback)
-		microcode_check();
+	ret = check_online_cpus();
+	if (ret)
+		goto put;
 
 
+	mutex_lock(&microcode_mutex);
+	ret = microcode_reload_late();
 	mutex_unlock(&microcode_mutex);
 	mutex_unlock(&microcode_mutex);
+
+put:
 	put_online_cpus();
 	put_online_cpus();
 
 
-	if (!ret)
+	if (ret >= 0)
 		ret = size;
 		ret = size;
 
 
 	return ret;
 	return ret;
@@ -611,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
 	if (system_state != SYSTEM_RUNNING)
 	if (system_state != SYSTEM_RUNNING)
 		return UCODE_NFOUND;
 		return UCODE_NFOUND;
 
 
-	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
-						     refresh_fw);
-
-	if (ustate == UCODE_OK) {
+	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
+	if (ustate == UCODE_NEW) {
 		pr_debug("CPU%d updated upon init\n", cpu);
 		pr_debug("CPU%d updated upon init\n", cpu);
 		apply_microcode_on_target(cpu);
 		apply_microcode_on_target(cpu);
 	}
 	}

+ 43 - 9
arch/x86/kernel/cpu/microcode/intel.c

@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 	if (!mc)
 	if (!mc)
 		return 0;
 		return 0;
 
 
+	/*
+	 * Save us the MSR write below - which is a particular expensive
+	 * operation - when the other hyperthread has updated the microcode
+	 * already.
+	 */
+	rev = intel_get_microcode_revision();
+	if (rev >= mc->hdr.rev) {
+		uci->cpu_sig.rev = rev;
+		return UCODE_OK;
+	}
+
+	/*
+	 * Writeback and invalidate caches before updating microcode to avoid
+	 * internal issues depending on what the microcode is updating.
+	 */
+	native_wbinvd();
+
 	/* write microcode via MSR 0x79 */
 	/* write microcode via MSR 0x79 */
 	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 
 
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
 
 
 static enum ucode_state apply_microcode_intel(int cpu)
 static enum ucode_state apply_microcode_intel(int cpu)
 {
 {
+	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	struct microcode_intel *mc;
 	struct microcode_intel *mc;
-	struct ucode_cpu_info *uci;
-	struct cpuinfo_x86 *c;
 	static int prev_rev;
 	static int prev_rev;
 	u32 rev;
 	u32 rev;
 
 
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
 	if (WARN_ON(raw_smp_processor_id() != cpu))
 	if (WARN_ON(raw_smp_processor_id() != cpu))
 		return UCODE_ERROR;
 		return UCODE_ERROR;
 
 
-	uci = ucode_cpu_info + cpu;
-	mc = uci->mc;
+	/* Look for a newer patch in our cache: */
+	mc = find_patch(uci);
 	if (!mc) {
 	if (!mc) {
-		/* Look for a newer patch in our cache: */
-		mc = find_patch(uci);
+		mc = uci->mc;
 		if (!mc)
 		if (!mc)
 			return UCODE_NFOUND;
 			return UCODE_NFOUND;
 	}
 	}
 
 
+	/*
+	 * Save us the MSR write below - which is a particular expensive
+	 * operation - when the other hyperthread has updated the microcode
+	 * already.
+	 */
+	rev = intel_get_microcode_revision();
+	if (rev >= mc->hdr.rev) {
+		uci->cpu_sig.rev = rev;
+		c->microcode = rev;
+		return UCODE_OK;
+	}
+
+	/*
+	 * Writeback and invalidate caches before updating microcode to avoid
+	 * internal issues depending on what the microcode is updating.
+	 */
+	native_wbinvd();
+
 	/* write microcode via MSR 0x79 */
 	/* write microcode via MSR 0x79 */
 	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 
 
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
 		prev_rev = rev;
 		prev_rev = rev;
 	}
 	}
 
 
-	c = &cpu_data(cpu);
-
 	uci->cpu_sig.rev = rev;
 	uci->cpu_sig.rev = rev;
 	c->microcode = rev;
 	c->microcode = rev;
 
 
@@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 	unsigned int leftover = size;
 	unsigned int leftover = size;
 	unsigned int curr_mc_size = 0, new_mc_size = 0;
 	unsigned int curr_mc_size = 0, new_mc_size = 0;
 	unsigned int csig, cpf;
 	unsigned int csig, cpf;
+	enum ucode_state ret = UCODE_OK;
 
 
 	while (leftover) {
 	while (leftover) {
 		struct microcode_header_intel mc_header;
 		struct microcode_header_intel mc_header;
@@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 			new_mc  = mc;
 			new_mc  = mc;
 			new_mc_size = mc_size;
 			new_mc_size = mc_size;
 			mc = NULL;	/* trigger new vmalloc */
 			mc = NULL;	/* trigger new vmalloc */
+			ret = UCODE_NEW;
 		}
 		}
 
 
 		ucode_ptr += mc_size;
 		ucode_ptr += mc_size;
@@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
 	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
 		 cpu, new_rev, uci->cpu_sig.rev);
 		 cpu, new_rev, uci->cpu_sig.rev);
 
 
-	return UCODE_OK;
+	return ret;
 }
 }
 
 
 static int get_ucode_fw(void *to, const void *from, size_t n)
 static int get_ucode_fw(void *to, const void *from, size_t n)

+ 1 - 1
arch/x86/kernel/ioport.c

@@ -23,7 +23,7 @@
 /*
 /*
  * this changes the io permissions bitmap in the current task.
  * this changes the io permissions bitmap in the current task.
  */
  */
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
 {
 {
 	struct thread_struct *t = &current->thread;
 	struct thread_struct *t = &current->thread;
 	struct tss_struct *tss;
 	struct tss_struct *tss;

+ 9 - 1
arch/x86/kernel/kprobes/core.c

@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
 
 
 bool arch_within_kprobe_blacklist(unsigned long addr)
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
 {
+	bool is_in_entry_trampoline_section = false;
+
+#ifdef CONFIG_X86_64
+	is_in_entry_trampoline_section =
+		(addr >= (unsigned long)__entry_trampoline_start &&
+		 addr < (unsigned long)__entry_trampoline_end);
+#endif
 	return  (addr >= (unsigned long)__kprobes_text_start &&
 	return  (addr >= (unsigned long)__kprobes_text_start &&
 		 addr < (unsigned long)__kprobes_text_end) ||
 		 addr < (unsigned long)__kprobes_text_end) ||
 		(addr >= (unsigned long)__entry_text_start &&
 		(addr >= (unsigned long)__entry_text_start &&
-		 addr < (unsigned long)__entry_text_end);
+		 addr < (unsigned long)__entry_text_end) ||
+		is_in_entry_trampoline_section;
 }
 }
 
 
 int __init arch_init_kprobes(void)
 int __init arch_init_kprobes(void)

+ 65 - 0
arch/x86/kernel/signal_compat.c

@@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void)
 	BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
 	BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
 #define CHECK_CSI_OFFSET(name)	  BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
 #define CHECK_CSI_OFFSET(name)	  BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_code)  != 8);
+
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code)  != 8);
 	 /*
 	 /*
 	 * Ensure that the size of each si_field never changes.
 	 * Ensure that the size of each si_field never changes.
 	 * If it does, it is a sign that the
 	 * If it does, it is a sign that the
@@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void)
 	CHECK_CSI_SIZE  (_kill, 2*sizeof(int));
 	CHECK_CSI_SIZE  (_kill, 2*sizeof(int));
 	CHECK_SI_SIZE   (_kill, 2*sizeof(int));
 	CHECK_SI_SIZE   (_kill, 2*sizeof(int));
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
+
 	CHECK_CSI_OFFSET(_timer);
 	CHECK_CSI_OFFSET(_timer);
 	CHECK_CSI_SIZE  (_timer, 3*sizeof(int));
 	CHECK_CSI_SIZE  (_timer, 3*sizeof(int));
 	CHECK_SI_SIZE   (_timer, 6*sizeof(int));
 	CHECK_SI_SIZE   (_timer, 6*sizeof(int));
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_tid)     != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_value)   != 0x18);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid)     != 0x0C);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value)   != 0x14);
+
 	CHECK_CSI_OFFSET(_rt);
 	CHECK_CSI_OFFSET(_rt);
 	CHECK_CSI_SIZE  (_rt, 3*sizeof(int));
 	CHECK_CSI_SIZE  (_rt, 3*sizeof(int));
 	CHECK_SI_SIZE   (_rt, 4*sizeof(int));
 	CHECK_SI_SIZE   (_rt, 4*sizeof(int));
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)   != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)   != 0x14);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid)   != 0x0C);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid)   != 0x10);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
+
 	CHECK_CSI_OFFSET(_sigchld);
 	CHECK_CSI_OFFSET(_sigchld);
 	CHECK_CSI_SIZE  (_sigchld, 5*sizeof(int));
 	CHECK_CSI_SIZE  (_sigchld, 5*sizeof(int));
 	CHECK_SI_SIZE   (_sigchld, 8*sizeof(int));
 	CHECK_SI_SIZE   (_sigchld, 8*sizeof(int));
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)    != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)    != 0x14);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_utime)  != 0x20);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_stime)  != 0x28);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid)    != 0x0C);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid)    != 0x10);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime)  != 0x18);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime)  != 0x1C);
+
 #ifdef CONFIG_X86_X32_ABI
 #ifdef CONFIG_X86_X32_ABI
 	CHECK_CSI_OFFSET(_sigchld_x32);
 	CHECK_CSI_OFFSET(_sigchld_x32);
 	CHECK_CSI_SIZE  (_sigchld_x32, 7*sizeof(int));
 	CHECK_CSI_SIZE  (_sigchld_x32, 7*sizeof(int));
 	/* no _sigchld_x32 in the generic siginfo_t */
 	/* no _sigchld_x32 in the generic siginfo_t */
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime)  != 0x18);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime)  != 0x20);
 #endif
 #endif
 
 
 	CHECK_CSI_OFFSET(_sigfault);
 	CHECK_CSI_OFFSET(_sigfault);
 	CHECK_CSI_SIZE  (_sigfault, 4*sizeof(int));
 	CHECK_CSI_SIZE  (_sigfault, 4*sizeof(int));
 	CHECK_SI_SIZE   (_sigfault, 8*sizeof(int));
 	CHECK_SI_SIZE   (_sigfault, 8*sizeof(int));
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
+
+	BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
+
+	BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18);
+
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
+
 	CHECK_CSI_OFFSET(_sigpoll);
 	CHECK_CSI_OFFSET(_sigpoll);
 	CHECK_CSI_SIZE  (_sigpoll, 2*sizeof(int));
 	CHECK_CSI_SIZE  (_sigpoll, 2*sizeof(int));
 	CHECK_SI_SIZE   (_sigpoll, 4*sizeof(int));
 	CHECK_SI_SIZE   (_sigpoll, 4*sizeof(int));
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_band)   != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_fd)     != 0x18);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd)   != 0x10);
+
 	CHECK_CSI_OFFSET(_sigsys);
 	CHECK_CSI_OFFSET(_sigsys);
 	CHECK_CSI_SIZE  (_sigsys, 3*sizeof(int));
 	CHECK_CSI_SIZE  (_sigsys, 3*sizeof(int));
 	CHECK_SI_SIZE   (_sigsys, 4*sizeof(int));
 	CHECK_SI_SIZE   (_sigsys, 4*sizeof(int));
 
 
+	BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_syscall)   != 0x18);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_arch)      != 0x1C);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall)   != 0x10);
+	BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch)      != 0x14);
+
 	/* any new si_fields should be added here */
 	/* any new si_fields should be added here */
 }
 }
 
 

+ 2 - 1
arch/x86/kernel/vm86_32.c

@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
 	return;
 	return;
 
 
 check_vip:
 check_vip:
-	if (VEFLAGS & X86_EFLAGS_VIP) {
+	if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
+	    (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
 		save_v86_state(regs, VM86_STI);
 		save_v86_state(regs, VM86_STI);
 		return;
 		return;
 	}
 	}

+ 2 - 0
arch/x86/kernel/vmlinux.lds.S

@@ -118,9 +118,11 @@ SECTIONS
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
 		. = ALIGN(PAGE_SIZE);
 		. = ALIGN(PAGE_SIZE);
+		VMLINUX_SYMBOL(__entry_trampoline_start) = .;
 		_entry_trampoline = .;
 		_entry_trampoline = .;
 		*(.entry_trampoline)
 		*(.entry_trampoline)
 		. = ALIGN(PAGE_SIZE);
 		. = ALIGN(PAGE_SIZE);
+		VMLINUX_SYMBOL(__entry_trampoline_end) = .;
 		ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
 		ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
 #endif
 #endif
 
 

+ 3 - 1
arch/x86/kvm/mmu.c

@@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 	else
 	else
 		pte_access &= ~ACC_WRITE_MASK;
 		pte_access &= ~ACC_WRITE_MASK;
 
 
+	if (!kvm_is_mmio_pfn(pfn))
+		spte |= shadow_me_mask;
+
 	spte |= (u64)pfn << PAGE_SHIFT;
 	spte |= (u64)pfn << PAGE_SHIFT;
-	spte |= shadow_me_mask;
 
 
 	if (pte_access & ACC_WRITE_MASK) {
 	if (pte_access & ACC_WRITE_MASK) {
 
 

+ 8 - 1
arch/x86/kvm/vmx.c

@@ -1045,6 +1045,13 @@ static inline bool is_machine_check(u32 intr_info)
 		(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
 		(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
 }
 }
 
 
+/* Undocumented: icebp/int1 */
+static inline bool is_icebp(u32 intr_info)
+{
+	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+		== (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
 static inline bool cpu_has_vmx_msr_bitmap(void)
 static inline bool cpu_has_vmx_msr_bitmap(void)
 {
 {
 	return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
 	return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
@@ -6179,7 +6186,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
 		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
 		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
 			vcpu->arch.dr6 &= ~15;
 			vcpu->arch.dr6 &= ~15;
 			vcpu->arch.dr6 |= dr6 | DR6_RTM;
 			vcpu->arch.dr6 |= dr6 | DR6_RTM;
-			if (!(dr6 & ~DR6_RESERVED)) /* icebp */
+			if (is_icebp(intr_info))
 				skip_emulated_instruction(vcpu);
 				skip_emulated_instruction(vcpu);
 
 
 			kvm_queue_exception(vcpu, DB_VECTOR);
 			kvm_queue_exception(vcpu, DB_VECTOR);

+ 3 - 3
arch/x86/mm/fault.c

@@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address)
 	if (!pmd_k)
 	if (!pmd_k)
 		return -1;
 		return -1;
 
 
-	if (pmd_huge(*pmd_k))
+	if (pmd_large(*pmd_k))
 		return 0;
 		return 0;
 
 
 	pte_k = pte_offset_kernel(pmd_k, address);
 	pte_k = pte_offset_kernel(pmd_k, address);
@@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address)
 	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
 	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
 		BUG();
 		BUG();
 
 
-	if (pud_huge(*pud))
+	if (pud_large(*pud))
 		return 0;
 		return 0;
 
 
 	pmd = pmd_offset(pud, address);
 	pmd = pmd_offset(pud, address);
@@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address)
 	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
 	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
 		BUG();
 		BUG();
 
 
-	if (pmd_huge(*pmd))
+	if (pmd_large(*pmd))
 		return 0;
 		return 0;
 
 
 	pte_ref = pte_offset_kernel(pmd_ref, address);
 	pte_ref = pte_offset_kernel(pmd_ref, address);

+ 28 - 32
arch/x86/mm/init_64.c

@@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
 
 
 #define PAGE_INUSE 0xFD
 #define PAGE_INUSE 0xFD
 
 
-static void __meminit free_pagetable(struct page *page, int order,
-		struct vmem_altmap *altmap)
+static void __meminit free_pagetable(struct page *page, int order)
 {
 {
 	unsigned long magic;
 	unsigned long magic;
 	unsigned int nr_pages = 1 << order;
 	unsigned int nr_pages = 1 << order;
 
 
-	if (altmap) {
-		vmem_altmap_free(altmap, nr_pages);
-		return;
-	}
-
 	/* bootmem page has reserved flag */
 	/* bootmem page has reserved flag */
 	if (PageReserved(page)) {
 	if (PageReserved(page)) {
 		__ClearPageReserved(page);
 		__ClearPageReserved(page);
@@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order,
 		free_pages((unsigned long)page_address(page), order);
 		free_pages((unsigned long)page_address(page), order);
 }
 }
 
 
-static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
+static void __meminit free_hugepage_table(struct page *page,
 		struct vmem_altmap *altmap)
 		struct vmem_altmap *altmap)
+{
+	if (altmap)
+		vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
+	else
+		free_pagetable(page, get_order(PMD_SIZE));
+}
+
+static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 {
 {
 	pte_t *pte;
 	pte_t *pte;
 	int i;
 	int i;
@@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
 	}
 	}
 
 
 	/* free a pte talbe */
 	/* free a pte talbe */
-	free_pagetable(pmd_page(*pmd), 0, altmap);
+	free_pagetable(pmd_page(*pmd), 0);
 	spin_lock(&init_mm.page_table_lock);
 	spin_lock(&init_mm.page_table_lock);
 	pmd_clear(pmd);
 	pmd_clear(pmd);
 	spin_unlock(&init_mm.page_table_lock);
 	spin_unlock(&init_mm.page_table_lock);
 }
 }
 
 
-static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
-		struct vmem_altmap *altmap)
+static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 {
 {
 	pmd_t *pmd;
 	pmd_t *pmd;
 	int i;
 	int i;
@@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
 	}
 	}
 
 
 	/* free a pmd talbe */
 	/* free a pmd talbe */
-	free_pagetable(pud_page(*pud), 0, altmap);
+	free_pagetable(pud_page(*pud), 0);
 	spin_lock(&init_mm.page_table_lock);
 	spin_lock(&init_mm.page_table_lock);
 	pud_clear(pud);
 	pud_clear(pud);
 	spin_unlock(&init_mm.page_table_lock);
 	spin_unlock(&init_mm.page_table_lock);
 }
 }
 
 
-static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
-		struct vmem_altmap *altmap)
+static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
 {
 {
 	pud_t *pud;
 	pud_t *pud;
 	int i;
 	int i;
@@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
 	}
 	}
 
 
 	/* free a pud talbe */
 	/* free a pud talbe */
-	free_pagetable(p4d_page(*p4d), 0, altmap);
+	free_pagetable(p4d_page(*p4d), 0);
 	spin_lock(&init_mm.page_table_lock);
 	spin_lock(&init_mm.page_table_lock);
 	p4d_clear(p4d);
 	p4d_clear(p4d);
 	spin_unlock(&init_mm.page_table_lock);
 	spin_unlock(&init_mm.page_table_lock);
@@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
 
 
 static void __meminit
 static void __meminit
 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
-		 struct vmem_altmap *altmap, bool direct)
+		 bool direct)
 {
 {
 	unsigned long next, pages = 0;
 	unsigned long next, pages = 0;
 	pte_t *pte;
 	pte_t *pte;
@@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 			 * freed when offlining, or simplely not in use.
 			 * freed when offlining, or simplely not in use.
 			 */
 			 */
 			if (!direct)
 			if (!direct)
-				free_pagetable(pte_page(*pte), 0, altmap);
+				free_pagetable(pte_page(*pte), 0);
 
 
 			spin_lock(&init_mm.page_table_lock);
 			spin_lock(&init_mm.page_table_lock);
 			pte_clear(&init_mm, addr, pte);
 			pte_clear(&init_mm, addr, pte);
@@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 
 
 			page_addr = page_address(pte_page(*pte));
 			page_addr = page_address(pte_page(*pte));
 			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
 			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
-				free_pagetable(pte_page(*pte), 0, altmap);
+				free_pagetable(pte_page(*pte), 0);
 
 
 				spin_lock(&init_mm.page_table_lock);
 				spin_lock(&init_mm.page_table_lock);
 				pte_clear(&init_mm, addr, pte);
 				pte_clear(&init_mm, addr, pte);
@@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 			if (IS_ALIGNED(addr, PMD_SIZE) &&
 			if (IS_ALIGNED(addr, PMD_SIZE) &&
 			    IS_ALIGNED(next, PMD_SIZE)) {
 			    IS_ALIGNED(next, PMD_SIZE)) {
 				if (!direct)
 				if (!direct)
-					free_pagetable(pmd_page(*pmd),
-						       get_order(PMD_SIZE),
-						       altmap);
+					free_hugepage_table(pmd_page(*pmd),
+							    altmap);
 
 
 				spin_lock(&init_mm.page_table_lock);
 				spin_lock(&init_mm.page_table_lock);
 				pmd_clear(pmd);
 				pmd_clear(pmd);
@@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 				page_addr = page_address(pmd_page(*pmd));
 				page_addr = page_address(pmd_page(*pmd));
 				if (!memchr_inv(page_addr, PAGE_INUSE,
 				if (!memchr_inv(page_addr, PAGE_INUSE,
 						PMD_SIZE)) {
 						PMD_SIZE)) {
-					free_pagetable(pmd_page(*pmd),
-						       get_order(PMD_SIZE),
-						       altmap);
+					free_hugepage_table(pmd_page(*pmd),
+							    altmap);
 
 
 					spin_lock(&init_mm.page_table_lock);
 					spin_lock(&init_mm.page_table_lock);
 					pmd_clear(pmd);
 					pmd_clear(pmd);
@@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 		}
 		}
 
 
 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
-		remove_pte_table(pte_base, addr, next, altmap, direct);
-		free_pte_table(pte_base, pmd, altmap);
+		remove_pte_table(pte_base, addr, next, direct);
+		free_pte_table(pte_base, pmd);
 	}
 	}
 
 
 	/* Call free_pmd_table() in remove_pud_table(). */
 	/* Call free_pmd_table() in remove_pud_table(). */
@@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 			    IS_ALIGNED(next, PUD_SIZE)) {
 			    IS_ALIGNED(next, PUD_SIZE)) {
 				if (!direct)
 				if (!direct)
 					free_pagetable(pud_page(*pud),
 					free_pagetable(pud_page(*pud),
-						       get_order(PUD_SIZE),
-						       altmap);
+						       get_order(PUD_SIZE));
 
 
 				spin_lock(&init_mm.page_table_lock);
 				spin_lock(&init_mm.page_table_lock);
 				pud_clear(pud);
 				pud_clear(pud);
@@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 				if (!memchr_inv(page_addr, PAGE_INUSE,
 				if (!memchr_inv(page_addr, PAGE_INUSE,
 						PUD_SIZE)) {
 						PUD_SIZE)) {
 					free_pagetable(pud_page(*pud),
 					free_pagetable(pud_page(*pud),
-						       get_order(PUD_SIZE),
-						       altmap);
+						       get_order(PUD_SIZE));
 
 
 					spin_lock(&init_mm.page_table_lock);
 					spin_lock(&init_mm.page_table_lock);
 					pud_clear(pud);
 					pud_clear(pud);
@@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 
 
 		pmd_base = pmd_offset(pud, 0);
 		pmd_base = pmd_offset(pud, 0);
 		remove_pmd_table(pmd_base, addr, next, direct, altmap);
 		remove_pmd_table(pmd_base, addr, next, direct, altmap);
-		free_pmd_table(pmd_base, pud, altmap);
+		free_pmd_table(pmd_base, pud);
 	}
 	}
 
 
 	if (direct)
 	if (direct)
@@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
 		 * to adapt for boot-time switching between 4 and 5 level page tables.
 		 * to adapt for boot-time switching between 4 and 5 level page tables.
 		 */
 		 */
 		if (CONFIG_PGTABLE_LEVELS == 5)
 		if (CONFIG_PGTABLE_LEVELS == 5)
-			free_pud_table(pud_base, p4d, altmap);
+			free_pud_table(pud_base, p4d);
 	}
 	}
 
 
 	if (direct)
 	if (direct)

+ 48 - 0
arch/x86/mm/pgtable.c

@@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd)
 
 
 	return 0;
 	return 0;
 }
 }
+
+/**
+ * pud_free_pmd_page - Clear pud entry and free pmd page.
+ * @pud: Pointer to a PUD.
+ *
+ * Context: The pud range has been unmaped and TLB purged.
+ * Return: 1 if clearing the entry succeeded. 0 otherwise.
+ */
+int pud_free_pmd_page(pud_t *pud)
+{
+	pmd_t *pmd;
+	int i;
+
+	if (pud_none(*pud))
+		return 1;
+
+	pmd = (pmd_t *)pud_page_vaddr(*pud);
+
+	for (i = 0; i < PTRS_PER_PMD; i++)
+		if (!pmd_free_pte_page(&pmd[i]))
+			return 0;
+
+	pud_clear(pud);
+	free_page((unsigned long)pmd);
+
+	return 1;
+}
+
+/**
+ * pmd_free_pte_page - Clear pmd entry and free pte page.
+ * @pmd: Pointer to a PMD.
+ *
+ * Context: The pmd range has been unmaped and TLB purged.
+ * Return: 1 if clearing the entry succeeded. 0 otherwise.
+ */
+int pmd_free_pte_page(pmd_t *pmd)
+{
+	pte_t *pte;
+
+	if (pmd_none(*pmd))
+		return 1;
+
+	pte = (pte_t *)pmd_page_vaddr(*pmd);
+	pmd_clear(pmd);
+	free_page((unsigned long)pte);
+
+	return 1;
+}
 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */

+ 1 - 1
arch/x86/mm/pti.c

@@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void)
 }
 }
 
 
 /*
 /*
- * Clone the ESPFIX P4D into the user space visinble page table
+ * Clone the ESPFIX P4D into the user space visible page table
  */
  */
 static void __init pti_setup_espfix64(void)
 static void __init pti_setup_espfix64(void)
 {
 {

+ 2 - 1
arch/x86/net/bpf_jit_comp.c

@@ -1223,7 +1223,7 @@ skip_init_addrs:
 	 * may converge on the last pass. In such case do one more
 	 * may converge on the last pass. In such case do one more
 	 * pass to emit the final image
 	 * pass to emit the final image
 	 */
 	 */
-	for (pass = 0; pass < 10 || image; pass++) {
+	for (pass = 0; pass < 20 || image; pass++) {
 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
 		if (proglen <= 0) {
 		if (proglen <= 0) {
 			image = NULL;
 			image = NULL;
@@ -1250,6 +1250,7 @@ skip_init_addrs:
 			}
 			}
 		}
 		}
 		oldproglen = proglen;
 		oldproglen = proglen;
+		cond_resched();
 	}
 	}
 
 
 	if (bpf_jit_enable > 1)
 	if (bpf_jit_enable > 1)

+ 2 - 2
drivers/acpi/acpi_watchdog.c

@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
 		res.start = gas->address;
 		res.start = gas->address;
 		if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 		if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
 			res.flags = IORESOURCE_MEM;
 			res.flags = IORESOURCE_MEM;
-			res.end = res.start + ALIGN(gas->access_width, 4);
+			res.end = res.start + ALIGN(gas->access_width, 4) - 1;
 		} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
 		} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
 			res.flags = IORESOURCE_IO;
 			res.flags = IORESOURCE_IO;
-			res.end = res.start + gas->access_width;
+			res.end = res.start + gas->access_width - 1;
 		} else {
 		} else {
 			pr_warn("Unsupported address space: %u\n",
 			pr_warn("Unsupported address space: %u\n",
 				gas->space_id);
 				gas->space_id);

+ 3 - 45
drivers/acpi/battery.c

@@ -70,7 +70,6 @@ static async_cookie_t async_cookie;
 static bool battery_driver_registered;
 static bool battery_driver_registered;
 static int battery_bix_broken_package;
 static int battery_bix_broken_package;
 static int battery_notification_delay_ms;
 static int battery_notification_delay_ms;
-static int battery_full_discharging;
 static unsigned int cache_time = 1000;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
 		return -ENODEV;
 		return -ENODEV;
 	switch (psp) {
 	switch (psp) {
 	case POWER_SUPPLY_PROP_STATUS:
 	case POWER_SUPPLY_PROP_STATUS:
-		if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
-			if (battery_full_discharging && battery->rate_now == 0)
-				val->intval = POWER_SUPPLY_STATUS_FULL;
-			else
-				val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
-		} else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+		if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
 			val->intval = POWER_SUPPLY_STATUS_CHARGING;
 			val->intval = POWER_SUPPLY_STATUS_CHARGING;
 		else if (acpi_battery_is_charged(battery))
 		else if (acpi_battery_is_charged(battery))
 			val->intval = POWER_SUPPLY_STATUS_FULL;
 			val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
 	return 0;
 	return 0;
 }
 }
 
 
-static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
-{
-	battery_full_discharging = 1;
-	return 0;
-}
-
 static const struct dmi_system_id bat_dmi_table[] __initconst = {
 static const struct dmi_system_id bat_dmi_table[] __initconst = {
 	{
 	{
 		.callback = battery_bix_broken_package_quirk,
 		.callback = battery_bix_broken_package_quirk,
@@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
 		},
 		},
 	},
 	},
-	{
-		.callback = battery_full_discharging_quirk,
-		.ident = "ASUS GL502VSK",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
-		},
-	},
-	{
-		.callback = battery_full_discharging_quirk,
-		.ident = "ASUS UX305LA",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
-		},
-	},
-	{
-		.callback = battery_full_discharging_quirk,
-		.ident = "ASUS UX360UA",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"),
-		},
-	},
-	{
-		.callback = battery_full_discharging_quirk,
-		.ident = "ASUS UX410UAK",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"),
-		},
-	},
 	{},
 	{},
 };
 };
 
 

+ 7 - 3
drivers/acpi/nfit/core.c

@@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
 	else
 	else
 		ndr_desc->numa_node = NUMA_NO_NODE;
 		ndr_desc->numa_node = NUMA_NO_NODE;
 
 
-	if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
+	/*
+	 * Persistence domain bits are hierarchical, if
+	 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
+	 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
+	 */
+	if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
 		set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
 		set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
-
-	if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
+	else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
 		set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
 		set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
 
 
 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {

+ 6 - 4
drivers/acpi/numa.c

@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
  */
  */
 int acpi_map_pxm_to_online_node(int pxm)
 int acpi_map_pxm_to_online_node(int pxm)
 {
 {
-	int node, n, dist, min_dist;
+	int node, min_node;
 
 
 	node = acpi_map_pxm_to_node(pxm);
 	node = acpi_map_pxm_to_node(pxm);
 
 
 	if (node == NUMA_NO_NODE)
 	if (node == NUMA_NO_NODE)
 		node = 0;
 		node = 0;
 
 
+	min_node = node;
 	if (!node_online(node)) {
 	if (!node_online(node)) {
-		min_dist = INT_MAX;
+		int min_dist = INT_MAX, dist, n;
+
 		for_each_online_node(n) {
 		for_each_online_node(n) {
 			dist = node_distance(node, n);
 			dist = node_distance(node, n);
 			if (dist < min_dist) {
 			if (dist < min_dist) {
 				min_dist = dist;
 				min_dist = dist;
-				node = n;
+				min_node = n;
 			}
 			}
 		}
 		}
 	}
 	}
 
 
-	return node;
+	return min_node;
 }
 }
 EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
 EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
 
 

+ 3 - 1
drivers/ata/ahci.c

@@ -550,7 +550,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	  .driver_data = board_ahci_yes_fbs },
 	  .driver_data = board_ahci_yes_fbs },
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
 	  .driver_data = board_ahci_yes_fbs },
 	  .driver_data = board_ahci_yes_fbs },
-	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
+	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
+	  .driver_data = board_ahci_yes_fbs },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
 	  .driver_data = board_ahci_yes_fbs },
 	  .driver_data = board_ahci_yes_fbs },
 
 
 	/* Promise */
 	/* Promise */

+ 10 - 0
drivers/ata/libahci.c

@@ -665,6 +665,16 @@ int ahci_stop_engine(struct ata_port *ap)
 	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
 	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
 		return 0;
 		return 0;
 
 
+	/*
+	 * Don't try to issue commands but return with ENODEV if the
+	 * AHCI controller not available anymore (e.g. due to PCIe hot
+	 * unplugging). Otherwise a 500ms delay for each port is added.
+	 */
+	if (tmp == 0xffffffff) {
+		dev_err(ap->host->dev, "AHCI controller unavailable!\n");
+		return -ENODEV;
+	}
+
 	/* setting HBA to idle */
 	/* setting HBA to idle */
 	tmp &= ~PORT_CMD_START;
 	tmp &= ~PORT_CMD_START;
 	writel(tmp, port_mmio + PORT_CMD);
 	writel(tmp, port_mmio + PORT_CMD);

+ 1 - 1
drivers/ata/libahci_platform.c

@@ -340,7 +340,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
  * 2) regulator for controlling the targets power (optional)
  * 2) regulator for controlling the targets power (optional)
  * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
  * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
  *    or for non devicetree enabled platforms a single clock
  *    or for non devicetree enabled platforms a single clock
- *	4) phys (optional)
+ * 4) phys (optional)
  *
  *
  * RETURNS:
  * RETURNS:
  * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
  * The allocated ahci_host_priv on success, otherwise an ERR_PTR value

+ 23 - 3
drivers/ata/libata-core.c

@@ -4530,6 +4530,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
 
 
+	/* Crucial BX100 SSD 500GB has broken LPM support */
+	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
+
+	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
+	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+	/* 512GB MX100 with newer firmware has only LPM issues */
+	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+
+	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
+	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+
 	/* devices that don't properly handle queued TRIM commands */
 	/* devices that don't properly handle queued TRIM commands */
 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4541,7 +4560,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
-	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -5401,8 +5422,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
 	 * We guarantee to LLDs that they will have at least one
 	 * We guarantee to LLDs that they will have at least one
 	 * non-zero sg if the command is a data command.
 	 * non-zero sg if the command is a data command.
 	 */
 	 */
-	if (WARN_ON_ONCE(ata_is_data(prot) &&
-			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
+	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
 		goto sys_err;
 		goto sys_err;
 
 
 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&

+ 2 - 1
drivers/ata/libata-eh.c

@@ -815,7 +815,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
 
 
 	if (ap->pflags & ATA_PFLAG_LOADING)
 	if (ap->pflags & ATA_PFLAG_LOADING)
 		ap->pflags &= ~ATA_PFLAG_LOADING;
 		ap->pflags &= ~ATA_PFLAG_LOADING;
-	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
+	else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
+		!(ap->flags & ATA_FLAG_SAS_HOST))
 		schedule_delayed_work(&ap->hotplug_task, 0);
 		schedule_delayed_work(&ap->hotplug_task, 0);
 
 
 	if (ap->pflags & ATA_PFLAG_RECOVERED)
 	if (ap->pflags & ATA_PFLAG_RECOVERED)

+ 10 - 2
drivers/ata/libata-scsi.c

@@ -3316,6 +3316,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
 		goto invalid_fld;
 		goto invalid_fld;
 	}
 	}
 
 
+	/* We may not issue NCQ commands to devices not supporting NCQ */
+	if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
+		fp = 1;
+		goto invalid_fld;
+	}
+
 	/* sanity check for pio multi commands */
 	/* sanity check for pio multi commands */
 	if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
 	if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
 		fp = 1;
 		fp = 1;
@@ -4282,7 +4288,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
 #ifdef ATA_DEBUG
 #ifdef ATA_DEBUG
 	struct scsi_device *scsidev = cmd->device;
 	struct scsi_device *scsidev = cmd->device;
 
 
-	DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
+	DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
 		ap->print_id,
 		ap->print_id,
 		scsidev->channel, scsidev->id, scsidev->lun,
 		scsidev->channel, scsidev->id, scsidev->lun,
 		cmd->cmnd);
 		cmd->cmnd);
@@ -4309,7 +4315,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
 		if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
 		if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
 			/* relay SCSI command to ATAPI device */
 			/* relay SCSI command to ATAPI device */
 			int len = COMMAND_SIZE(scsi_op);
 			int len = COMMAND_SIZE(scsi_op);
-			if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
+			if (unlikely(len > scmd->cmd_len ||
+				     len > dev->cdb_len ||
+				     scmd->cmd_len > ATAPI_CDB_LEN))
 				goto bad_cdb_len;
 				goto bad_cdb_len;
 
 
 			xlat_func = atapi_xlat;
 			xlat_func = atapi_xlat;

+ 39 - 23
drivers/ata/sata_rcar.c

@@ -146,6 +146,7 @@
 enum sata_rcar_type {
 enum sata_rcar_type {
 	RCAR_GEN1_SATA,
 	RCAR_GEN1_SATA,
 	RCAR_GEN2_SATA,
 	RCAR_GEN2_SATA,
+	RCAR_GEN3_SATA,
 	RCAR_R8A7790_ES1_SATA,
 	RCAR_R8A7790_ES1_SATA,
 };
 };
 
 
@@ -784,26 +785,11 @@ static void sata_rcar_setup_port(struct ata_host *host)
 	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD << 2);
 	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD << 2);
 }
 }
 
 
-static void sata_rcar_init_controller(struct ata_host *host)
+static void sata_rcar_init_module(struct sata_rcar_priv *priv)
 {
 {
-	struct sata_rcar_priv *priv = host->private_data;
 	void __iomem *base = priv->base;
 	void __iomem *base = priv->base;
 	u32 val;
 	u32 val;
 
 
-	/* reset and setup phy */
-	switch (priv->type) {
-	case RCAR_GEN1_SATA:
-		sata_rcar_gen1_phy_init(priv);
-		break;
-	case RCAR_GEN2_SATA:
-	case RCAR_R8A7790_ES1_SATA:
-		sata_rcar_gen2_phy_init(priv);
-		break;
-	default:
-		dev_warn(host->dev, "SATA phy is not initialized\n");
-		break;
-	}
-
 	/* SATA-IP reset state */
 	/* SATA-IP reset state */
 	val = ioread32(base + ATAPI_CONTROL1_REG);
 	val = ioread32(base + ATAPI_CONTROL1_REG);
 	val |= ATAPI_CONTROL1_RESET;
 	val |= ATAPI_CONTROL1_RESET;
@@ -824,10 +810,33 @@ static void sata_rcar_init_controller(struct ata_host *host)
 	/* ack and mask */
 	/* ack and mask */
 	iowrite32(0, base + SATAINTSTAT_REG);
 	iowrite32(0, base + SATAINTSTAT_REG);
 	iowrite32(0x7ff, base + SATAINTMASK_REG);
 	iowrite32(0x7ff, base + SATAINTMASK_REG);
+
 	/* enable interrupts */
 	/* enable interrupts */
 	iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
 	iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
 }
 }
 
 
+static void sata_rcar_init_controller(struct ata_host *host)
+{
+	struct sata_rcar_priv *priv = host->private_data;
+
+	/* reset and setup phy */
+	switch (priv->type) {
+	case RCAR_GEN1_SATA:
+		sata_rcar_gen1_phy_init(priv);
+		break;
+	case RCAR_GEN2_SATA:
+	case RCAR_GEN3_SATA:
+	case RCAR_R8A7790_ES1_SATA:
+		sata_rcar_gen2_phy_init(priv);
+		break;
+	default:
+		dev_warn(host->dev, "SATA phy is not initialized\n");
+		break;
+	}
+
+	sata_rcar_init_module(priv);
+}
+
 static const struct of_device_id sata_rcar_match[] = {
 static const struct of_device_id sata_rcar_match[] = {
 	{
 	{
 		/* Deprecated by "renesas,sata-r8a7779" */
 		/* Deprecated by "renesas,sata-r8a7779" */
@@ -856,7 +865,7 @@ static const struct of_device_id sata_rcar_match[] = {
 	},
 	},
 	{
 	{
 		.compatible = "renesas,sata-r8a7795",
 		.compatible = "renesas,sata-r8a7795",
-		.data = (void *)RCAR_GEN2_SATA
+		.data = (void *)RCAR_GEN3_SATA
 	},
 	},
 	{
 	{
 		.compatible = "renesas,rcar-gen2-sata",
 		.compatible = "renesas,rcar-gen2-sata",
@@ -864,7 +873,7 @@ static const struct of_device_id sata_rcar_match[] = {
 	},
 	},
 	{
 	{
 		.compatible = "renesas,rcar-gen3-sata",
 		.compatible = "renesas,rcar-gen3-sata",
-		.data = (void *)RCAR_GEN2_SATA
+		.data = (void *)RCAR_GEN3_SATA
 	},
 	},
 	{ },
 	{ },
 };
 };
@@ -982,11 +991,18 @@ static int sata_rcar_resume(struct device *dev)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* ack and mask */
-	iowrite32(0, base + SATAINTSTAT_REG);
-	iowrite32(0x7ff, base + SATAINTMASK_REG);
-	/* enable interrupts */
-	iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
+	if (priv->type == RCAR_GEN3_SATA) {
+		sata_rcar_gen2_phy_init(priv);
+		sata_rcar_init_module(priv);
+	} else {
+		/* ack and mask */
+		iowrite32(0, base + SATAINTSTAT_REG);
+		iowrite32(0x7ff, base + SATAINTMASK_REG);
+
+		/* enable interrupts */
+		iowrite32(ATAPI_INT_ENABLE_SATAINT,
+			  base + ATAPI_INT_ENABLE_REG);
+	}
 
 
 	ata_host_resume(host);
 	ata_host_resume(host);
 
 

+ 3 - 3
drivers/auxdisplay/img-ascii-lcd.c

@@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = {
 static void malta_update(struct img_ascii_lcd_ctx *ctx)
 static void malta_update(struct img_ascii_lcd_ctx *ctx)
 {
 {
 	unsigned int i;
 	unsigned int i;
-	int err;
+	int err = 0;
 
 
 	for (i = 0; i < ctx->cfg->num_chars; i++) {
 	for (i = 0; i < ctx->cfg->num_chars; i++) {
 		err = regmap_write(ctx->regmap,
 		err = regmap_write(ctx->regmap,
@@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
 static void sead3_update(struct img_ascii_lcd_ctx *ctx)
 static void sead3_update(struct img_ascii_lcd_ctx *ctx)
 {
 {
 	unsigned int i;
 	unsigned int i;
-	int err;
+	int err = 0;
 
 
 	for (i = 0; i < ctx->cfg->num_chars; i++) {
 	for (i = 0; i < ctx->cfg->num_chars; i++) {
 		err = sead3_wait_lcd_idle(ctx);
 		err = sead3_wait_lcd_idle(ctx);
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
 
 
 /**
 /**
  * img_ascii_lcd_scroll() - scroll the display by a character
  * img_ascii_lcd_scroll() - scroll the display by a character
- * @arg: really a pointer to the private data structure
+ * @t: really a pointer to the private data structure
  *
  *
  * Scroll the current message along the LCD by one character, rearming the
  * Scroll the current message along the LCD by one character, rearming the
  * timer if required.
  * timer if required.

+ 3 - 3
drivers/auxdisplay/panel.c

@@ -1372,7 +1372,7 @@ static void panel_process_inputs(void)
 				break;
 				break;
 			input->rise_timer = 0;
 			input->rise_timer = 0;
 			input->state = INPUT_ST_RISING;
 			input->state = INPUT_ST_RISING;
-			/* no break here, fall through */
+			/* fall through */
 		case INPUT_ST_RISING:
 		case INPUT_ST_RISING:
 			if ((phys_curr & input->mask) != input->value) {
 			if ((phys_curr & input->mask) != input->value) {
 				input->state = INPUT_ST_LOW;
 				input->state = INPUT_ST_LOW;
@@ -1385,11 +1385,11 @@ static void panel_process_inputs(void)
 			}
 			}
 			input->high_timer = 0;
 			input->high_timer = 0;
 			input->state = INPUT_ST_HIGH;
 			input->state = INPUT_ST_HIGH;
-			/* no break here, fall through */
+			/* fall through */
 		case INPUT_ST_HIGH:
 		case INPUT_ST_HIGH:
 			if (input_state_high(input))
 			if (input_state_high(input))
 				break;
 				break;
-			/* no break here, fall through */
+			/* fall through */
 		case INPUT_ST_FALLING:
 		case INPUT_ST_FALLING:
 			input_state_falling(input);
 			input_state_falling(input);
 		}
 		}

+ 1 - 1
drivers/block/loop.c

@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
 	struct iov_iter i;
 	struct iov_iter i;
 	ssize_t bw;
 	ssize_t bw;
 
 
-	iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
+	iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
 
 
 	file_start_write(file);
 	file_start_write(file);
 	bw = vfs_iter_write(file, &i, ppos, 0);
 	bw = vfs_iter_write(file, &i, ppos, 0);

+ 8 - 9
drivers/block/xen-blkfront.c

@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
 
 
 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
 static void blkfront_gather_backend_features(struct blkfront_info *info);
 static void blkfront_gather_backend_features(struct blkfront_info *info);
+static int negotiate_mq(struct blkfront_info *info);
 
 
 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
 {
 {
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
 	unsigned int i, max_page_order;
 	unsigned int i, max_page_order;
 	unsigned int ring_page_order;
 	unsigned int ring_page_order;
 
 
+	if (!info)
+		return -ENODEV;
+
 	max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
 	max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
 					      "max-ring-page-order", 0);
 					      "max-ring-page-order", 0);
 	ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
 	ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
 	info->nr_ring_pages = 1 << ring_page_order;
 	info->nr_ring_pages = 1 << ring_page_order;
 
 
+	err = negotiate_mq(info);
+	if (err)
+		goto destroy_blkring;
+
 	for (i = 0; i < info->nr_rings; i++) {
 	for (i = 0; i < info->nr_rings; i++) {
 		struct blkfront_ring_info *rinfo = &info->rinfo[i];
 		struct blkfront_ring_info *rinfo = &info->rinfo[i];
 
 
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
 	}
 	}
 
 
 	info->xbdev = dev;
 	info->xbdev = dev;
-	err = negotiate_mq(info);
-	if (err) {
-		kfree(info);
-		return err;
-	}
 
 
 	mutex_init(&info->mutex);
 	mutex_init(&info->mutex);
 	info->vdevice = vdevice;
 	info->vdevice = vdevice;
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
 
 
 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
 
-	err = negotiate_mq(info);
-	if (err)
-		return err;
-
 	err = talk_to_blkback(dev, info);
 	err = talk_to_blkback(dev, info);
 	if (!err)
 	if (!err)
 		blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
 		blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);

+ 4 - 4
drivers/bluetooth/btusb.c

@@ -231,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = {
 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -264,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = {
 	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
 
 	/* QCA ROME chipset */
 	/* QCA ROME chipset */
+	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -390,10 +390,10 @@ static const struct usb_device_id blacklist_table[] = {
  */
  */
 static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
 static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
 	{
 	{
-		/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
+		/* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */
 		.matches = {
 		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
 		},
 		},
 	},
 	},
 	{}
 	{}

+ 9 - 4
drivers/bluetooth/hci_bcm.c

@@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
 
 
 	bt_dev_dbg(bdev, "Host wake IRQ");
 	bt_dev_dbg(bdev, "Host wake IRQ");
 
 
-	pm_request_resume(bdev->dev);
+	pm_runtime_get(bdev->dev);
+	pm_runtime_mark_last_busy(bdev->dev);
+	pm_runtime_put_autosuspend(bdev->dev);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
@@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
 	.usb_auto_sleep = 0,
 	.usb_auto_sleep = 0,
 	.usb_resume_timeout = 0,
 	.usb_resume_timeout = 0,
 	.break_to_host = 0,
 	.break_to_host = 0,
-	.pulsed_host_wake = 0,
+	.pulsed_host_wake = 1,
 };
 };
 
 
 static int bcm_setup_sleep(struct hci_uart *hu)
 static int bcm_setup_sleep(struct hci_uart *hu)
@@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
 	} else if (!bcm->rx_skb) {
 	} else if (!bcm->rx_skb) {
 		/* Delay auto-suspend when receiving completed packet */
 		/* Delay auto-suspend when receiving completed packet */
 		mutex_lock(&bcm_device_lock);
 		mutex_lock(&bcm_device_lock);
-		if (bcm->dev && bcm_device_exists(bcm->dev))
-			pm_request_resume(bcm->dev->dev);
+		if (bcm->dev && bcm_device_exists(bcm->dev)) {
+			pm_runtime_get(bcm->dev->dev);
+			pm_runtime_mark_last_busy(bcm->dev->dev);
+			pm_runtime_put_autosuspend(bcm->dev->dev);
+		}
 		mutex_unlock(&bcm_device_lock);
 		mutex_unlock(&bcm_device_lock);
 	}
 	}
 
 

+ 8 - 4
drivers/clk/bcm/clk-bcm2835.c

@@ -449,17 +449,17 @@ struct bcm2835_pll_ana_bits {
 static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
 static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
 	.mask0 = 0,
 	.mask0 = 0,
 	.set0 = 0,
 	.set0 = 0,
-	.mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK),
+	.mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK,
 	.set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
 	.set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
-	.mask3 = (u32)~A2W_PLL_KA_MASK,
+	.mask3 = A2W_PLL_KA_MASK,
 	.set3 = (2 << A2W_PLL_KA_SHIFT),
 	.set3 = (2 << A2W_PLL_KA_SHIFT),
 	.fb_prediv_mask = BIT(14),
 	.fb_prediv_mask = BIT(14),
 };
 };
 
 
 static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
 static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
-	.mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK),
+	.mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK,
 	.set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
 	.set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
-	.mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK),
+	.mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK,
 	.set1 = (6 << A2W_PLLH_KP_SHIFT),
 	.set1 = (6 << A2W_PLLH_KP_SHIFT),
 	.mask3 = 0,
 	.mask3 = 0,
 	.set3 = 0,
 	.set3 = 0,
@@ -623,8 +623,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
 		     ~A2W_PLL_CTRL_PWRDN);
 		     ~A2W_PLL_CTRL_PWRDN);
 
 
 	/* Take the PLL out of reset. */
 	/* Take the PLL out of reset. */
+	spin_lock(&cprman->regs_lock);
 	cprman_write(cprman, data->cm_ctrl_reg,
 	cprman_write(cprman, data->cm_ctrl_reg,
 		     cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
 		     cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
+	spin_unlock(&cprman->regs_lock);
 
 
 	/* Wait for the PLL to lock. */
 	/* Wait for the PLL to lock. */
 	timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
 	timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
@@ -701,9 +703,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
 	}
 	}
 
 
 	/* Unmask the reference clock from the oscillator. */
 	/* Unmask the reference clock from the oscillator. */
+	spin_lock(&cprman->regs_lock);
 	cprman_write(cprman, A2W_XOSC_CTRL,
 	cprman_write(cprman, A2W_XOSC_CTRL,
 		     cprman_read(cprman, A2W_XOSC_CTRL) |
 		     cprman_read(cprman, A2W_XOSC_CTRL) |
 		     data->reference_enable_mask);
 		     data->reference_enable_mask);
+	spin_unlock(&cprman->regs_lock);
 
 
 	if (do_ana_setup_first)
 	if (do_ana_setup_first)
 		bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
 		bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);

+ 17 - 11
drivers/clk/clk-aspeed.c

@@ -205,6 +205,18 @@ static const struct aspeed_clk_soc_data ast2400_data = {
 	.calc_pll = aspeed_ast2400_calc_pll,
 	.calc_pll = aspeed_ast2400_calc_pll,
 };
 };
 
 
+static int aspeed_clk_is_enabled(struct clk_hw *hw)
+{
+	struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
+	u32 clk = BIT(gate->clock_idx);
+	u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
+	u32 reg;
+
+	regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
+
+	return ((reg & clk) == enval) ? 1 : 0;
+}
+
 static int aspeed_clk_enable(struct clk_hw *hw)
 static int aspeed_clk_enable(struct clk_hw *hw)
 {
 {
 	struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
 	struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
@@ -215,6 +227,11 @@ static int aspeed_clk_enable(struct clk_hw *hw)
 
 
 	spin_lock_irqsave(gate->lock, flags);
 	spin_lock_irqsave(gate->lock, flags);
 
 
+	if (aspeed_clk_is_enabled(hw)) {
+		spin_unlock_irqrestore(gate->lock, flags);
+		return 0;
+	}
+
 	if (gate->reset_idx >= 0) {
 	if (gate->reset_idx >= 0) {
 		/* Put IP in reset */
 		/* Put IP in reset */
 		regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
 		regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
@@ -255,17 +272,6 @@ static void aspeed_clk_disable(struct clk_hw *hw)
 	spin_unlock_irqrestore(gate->lock, flags);
 	spin_unlock_irqrestore(gate->lock, flags);
 }
 }
 
 
-static int aspeed_clk_is_enabled(struct clk_hw *hw)
-{
-	struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
-	u32 clk = BIT(gate->clock_idx);
-	u32 reg;
-
-	regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
-
-	return (reg & clk) ? 0 : 1;
-}
-
 static const struct clk_ops aspeed_clk_gate_ops = {
 static const struct clk_ops aspeed_clk_gate_ops = {
 	.enable = aspeed_clk_enable,
 	.enable = aspeed_clk_enable,
 	.disable = aspeed_clk_disable,
 	.disable = aspeed_clk_disable,

+ 28 - 18
drivers/clk/clk.c

@@ -1125,8 +1125,10 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
 {
 {
 	lockdep_assert_held(&prepare_lock);
 	lockdep_assert_held(&prepare_lock);
 
 
-	if (!core)
+	if (!core) {
+		req->rate = 0;
 		return 0;
 		return 0;
+	}
 
 
 	clk_core_init_rate_req(core, req);
 	clk_core_init_rate_req(core, req);
 
 
@@ -2309,8 +2311,11 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
 
 
 	trace_clk_set_phase(core, degrees);
 	trace_clk_set_phase(core, degrees);
 
 
-	if (core->ops->set_phase)
+	if (core->ops->set_phase) {
 		ret = core->ops->set_phase(core->hw, degrees);
 		ret = core->ops->set_phase(core->hw, degrees);
+		if (!ret)
+			core->phase = degrees;
+	}
 
 
 	trace_clk_set_phase_complete(core, degrees);
 	trace_clk_set_phase_complete(core, degrees);
 
 
@@ -2967,23 +2972,38 @@ static int __clk_core_init(struct clk_core *core)
 		rate = 0;
 		rate = 0;
 	core->rate = core->req_rate = rate;
 	core->rate = core->req_rate = rate;
 
 
+	/*
+	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
+	 * don't get accidentally disabled when walking the orphan tree and
+	 * reparenting clocks
+	 */
+	if (core->flags & CLK_IS_CRITICAL) {
+		unsigned long flags;
+
+		clk_core_prepare(core);
+
+		flags = clk_enable_lock();
+		clk_core_enable(core);
+		clk_enable_unlock(flags);
+	}
+
 	/*
 	/*
 	 * walk the list of orphan clocks and reparent any that newly finds a
 	 * walk the list of orphan clocks and reparent any that newly finds a
 	 * parent.
 	 * parent.
 	 */
 	 */
 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
 		struct clk_core *parent = __clk_init_parent(orphan);
 		struct clk_core *parent = __clk_init_parent(orphan);
-		unsigned long flags;
 
 
 		/*
 		/*
-		 * we could call __clk_set_parent, but that would result in a
-		 * redundant call to the .set_rate op, if it exists
+		 * We need to use __clk_set_parent_before() and _after() to
+		 * to properly migrate any prepare/enable count of the orphan
+		 * clock. This is important for CLK_IS_CRITICAL clocks, which
+		 * are enabled during init but might not have a parent yet.
 		 */
 		 */
 		if (parent) {
 		if (parent) {
 			/* update the clk tree topology */
 			/* update the clk tree topology */
-			flags = clk_enable_lock();
-			clk_reparent(orphan, parent);
-			clk_enable_unlock(flags);
+			__clk_set_parent_before(orphan, parent);
+			__clk_set_parent_after(orphan, parent, NULL);
 			__clk_recalc_accuracies(orphan);
 			__clk_recalc_accuracies(orphan);
 			__clk_recalc_rates(orphan, 0);
 			__clk_recalc_rates(orphan, 0);
 		}
 		}
@@ -3000,16 +3020,6 @@ static int __clk_core_init(struct clk_core *core)
 	if (core->ops->init)
 	if (core->ops->init)
 		core->ops->init(core->hw);
 		core->ops->init(core->hw);
 
 
-	if (core->flags & CLK_IS_CRITICAL) {
-		unsigned long flags;
-
-		clk_core_prepare(core);
-
-		flags = clk_enable_lock();
-		clk_core_enable(core);
-		clk_enable_unlock(flags);
-	}
-
 	kref_init(&core->ref);
 	kref_init(&core->ref);
 out:
 out:
 	clk_pm_runtime_put(core);
 	clk_pm_runtime_put(core);

+ 2 - 0
drivers/clk/hisilicon/clk-hi3660-stub.c

@@ -149,6 +149,8 @@ static int hi3660_stub_clk_probe(struct platform_device *pdev)
 		return PTR_ERR(stub_clk_chan.mbox);
 		return PTR_ERR(stub_clk_chan.mbox);
 
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
 	freq_reg = devm_ioremap(dev, res->start, resource_size(res));
 	freq_reg = devm_ioremap(dev, res->start, resource_size(res));
 	if (!freq_reg)
 	if (!freq_reg)
 		return -ENOMEM;
 		return -ENOMEM;

+ 17 - 3
drivers/clk/imx/clk-imx51-imx53.c

@@ -131,7 +131,17 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
 static struct clk *clk[IMX5_CLK_END];
 static struct clk *clk[IMX5_CLK_END];
 static struct clk_onecell_data clk_data;
 static struct clk_onecell_data clk_data;
 
 
-static struct clk ** const uart_clks[] __initconst = {
+static struct clk ** const uart_clks_mx51[] __initconst = {
+	&clk[IMX5_CLK_UART1_IPG_GATE],
+	&clk[IMX5_CLK_UART1_PER_GATE],
+	&clk[IMX5_CLK_UART2_IPG_GATE],
+	&clk[IMX5_CLK_UART2_PER_GATE],
+	&clk[IMX5_CLK_UART3_IPG_GATE],
+	&clk[IMX5_CLK_UART3_PER_GATE],
+	NULL
+};
+
+static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
 	&clk[IMX5_CLK_UART1_IPG_GATE],
 	&clk[IMX5_CLK_UART1_IPG_GATE],
 	&clk[IMX5_CLK_UART1_PER_GATE],
 	&clk[IMX5_CLK_UART1_PER_GATE],
 	&clk[IMX5_CLK_UART2_IPG_GATE],
 	&clk[IMX5_CLK_UART2_IPG_GATE],
@@ -321,8 +331,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
 	clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
 	clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
 	clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
 	clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
 	clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
 	clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
-
-	imx_register_uart_clocks(uart_clks);
 }
 }
 
 
 static void __init mx50_clocks_init(struct device_node *np)
 static void __init mx50_clocks_init(struct device_node *np)
@@ -388,6 +396,8 @@ static void __init mx50_clocks_init(struct device_node *np)
 
 
 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
+
+	imx_register_uart_clocks(uart_clks_mx50_mx53);
 }
 }
 CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
 CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
 
 
@@ -477,6 +487,8 @@ static void __init mx51_clocks_init(struct device_node *np)
 	val = readl(MXC_CCM_CLPCR);
 	val = readl(MXC_CCM_CLPCR);
 	val |= 1 << 23;
 	val |= 1 << 23;
 	writel(val, MXC_CCM_CLPCR);
 	writel(val, MXC_CCM_CLPCR);
+
+	imx_register_uart_clocks(uart_clks_mx51);
 }
 }
 CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
 CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
 
 
@@ -606,5 +618,7 @@ static void __init mx53_clocks_init(struct device_node *np)
 
 
 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
+
+	imx_register_uart_clocks(uart_clks_mx50_mx53);
 }
 }
 CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
 CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);

+ 2 - 3
drivers/clk/qcom/apcs-msm8916.c

@@ -49,11 +49,10 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
 	struct clk_regmap_mux_div *a53cc;
 	struct clk_regmap_mux_div *a53cc;
 	struct regmap *regmap;
 	struct regmap *regmap;
 	struct clk_init_data init = { };
 	struct clk_init_data init = { };
-	int ret;
+	int ret = -ENODEV;
 
 
 	regmap = dev_get_regmap(parent, NULL);
 	regmap = dev_get_regmap(parent, NULL);
-	if (IS_ERR(regmap)) {
-		ret = PTR_ERR(regmap);
+	if (!regmap) {
 		dev_err(dev, "failed to get regmap: %d\n", ret);
 		dev_err(dev, "failed to get regmap: %d\n", ret);
 		return ret;
 		return ret;
 	}
 	}

+ 3 - 3
drivers/clk/sunxi-ng/ccu-sun6i-a31.c

@@ -762,7 +762,7 @@ static struct ccu_mp out_a_clk = {
 		.features	= CCU_FEATURE_FIXED_PREDIV,
 		.features	= CCU_FEATURE_FIXED_PREDIV,
 		.hw.init	= CLK_HW_INIT_PARENTS("out-a",
 		.hw.init	= CLK_HW_INIT_PARENTS("out-a",
 						      clk_out_parents,
 						      clk_out_parents,
-						      &ccu_div_ops,
+						      &ccu_mp_ops,
 						      0),
 						      0),
 	},
 	},
 };
 };
@@ -783,7 +783,7 @@ static struct ccu_mp out_b_clk = {
 		.features	= CCU_FEATURE_FIXED_PREDIV,
 		.features	= CCU_FEATURE_FIXED_PREDIV,
 		.hw.init	= CLK_HW_INIT_PARENTS("out-b",
 		.hw.init	= CLK_HW_INIT_PARENTS("out-b",
 						      clk_out_parents,
 						      clk_out_parents,
-						      &ccu_div_ops,
+						      &ccu_mp_ops,
 						      0),
 						      0),
 	},
 	},
 };
 };
@@ -804,7 +804,7 @@ static struct ccu_mp out_c_clk = {
 		.features	= CCU_FEATURE_FIXED_PREDIV,
 		.features	= CCU_FEATURE_FIXED_PREDIV,
 		.hw.init	= CLK_HW_INIT_PARENTS("out-c",
 		.hw.init	= CLK_HW_INIT_PARENTS("out-c",
 						      clk_out_parents,
 						      clk_out_parents,
-						      &ccu_div_ops,
+						      &ccu_mp_ops,
 						      0),
 						      0),
 	},
 	},
 };
 };

+ 1 - 1
drivers/clk/ti/clk-33xx.c

@@ -45,7 +45,7 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
 
 
 static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
 static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
 	{ AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
 	{ AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
-	{ AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP, "lcd_gclk", "lcdc_clkdm" },
+	{ AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },
 	{ AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
 	{ AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
 	{ AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
 	{ AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
 	{ AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },
 	{ AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },

+ 1 - 1
drivers/clk/ti/clk-43xx.c

@@ -187,7 +187,7 @@ static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst
 	{ AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
 	{ AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
 	{ AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
 	{ AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
 	{ AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
 	{ AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
-	{ AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "disp_clk", "dss_clkdm" },
+	{ AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },
 	{ AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
 	{ AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
 	{ 0 },
 	{ 0 },
 };
 };

+ 2 - 0
drivers/clk/ti/clkctrl.c

@@ -537,6 +537,8 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 		init.parent_names = &reg_data->parent;
 		init.parent_names = &reg_data->parent;
 		init.num_parents = 1;
 		init.num_parents = 1;
 		init.flags = 0;
 		init.flags = 0;
+		if (reg_data->flags & CLKF_SET_RATE_PARENT)
+			init.flags |= CLK_SET_RATE_PARENT;
 		init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
 		init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
 				      node->parent->name, node->name,
 				      node->parent->name, node->name,
 				      reg_data->offset, 0);
 				      reg_data->offset, 0);

+ 1 - 0
drivers/clocksource/Kconfig

@@ -386,6 +386,7 @@ config ATMEL_PIT
 
 
 config ATMEL_ST
 config ATMEL_ST
 	bool "Atmel ST timer support" if COMPILE_TEST
 	bool "Atmel ST timer support" if COMPILE_TEST
+	depends on HAS_IOMEM
 	select TIMER_OF
 	select TIMER_OF
 	select MFD_SYSCON
 	select MFD_SYSCON
 	help
 	help

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно