浏览代码

Merge tag 'v4.10-rc8' into drm-next

Linux 4.10-rc8

Backmerge Linus rc8 to fix some conflicts, but also
to avoid pulling it in via a fixes pull from someone.
Dave Airlie 8 年之前
父节点
当前提交
94000cc329
共有 100 个文件被更改,包括 699 次插入539 次删除
  1. 3 0
      Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
  2. 1 1
      Documentation/devicetree/bindings/net/mediatek-net.txt
  3. 3 2
      Documentation/devicetree/bindings/net/phy.txt
  4. 0 5
      Documentation/media/uapi/cec/cec-func-close.rst
  5. 0 5
      Documentation/media/uapi/cec/cec-func-ioctl.rst
  6. 0 5
      Documentation/media/uapi/cec/cec-func-open.rst
  7. 0 5
      Documentation/media/uapi/cec/cec-func-poll.rst
  8. 12 5
      Documentation/media/uapi/cec/cec-intro.rst
  9. 0 5
      Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
  10. 0 5
      Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
  11. 0 5
      Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
  12. 0 5
      Documentation/media/uapi/cec/cec-ioc-dqevent.rst
  13. 0 5
      Documentation/media/uapi/cec/cec-ioc-g-mode.rst
  14. 0 5
      Documentation/media/uapi/cec/cec-ioc-receive.rst
  15. 1 3
      Documentation/power/states.txt
  16. 21 22
      MAINTAINERS
  17. 3 3
      Makefile
  18. 3 1
      arch/arc/include/asm/delay.h
  19. 7 7
      arch/arc/kernel/head.S
  20. 23 32
      arch/arc/kernel/mcip.c
  21. 20 5
      arch/arc/kernel/smp.c
  22. 2 1
      arch/arc/kernel/unaligned.c
  23. 1 1
      arch/arm/boot/dts/Makefile
  24. 8 0
      arch/arm/boot/dts/imx1.dtsi
  25. 8 0
      arch/arm/boot/dts/imx23.dtsi
  26. 8 0
      arch/arm/boot/dts/imx25.dtsi
  27. 8 0
      arch/arm/boot/dts/imx27.dtsi
  28. 8 0
      arch/arm/boot/dts/imx28.dtsi
  29. 8 0
      arch/arm/boot/dts/imx31.dtsi
  30. 8 0
      arch/arm/boot/dts/imx35.dtsi
  31. 8 0
      arch/arm/boot/dts/imx50.dtsi
  32. 8 0
      arch/arm/boot/dts/imx51.dtsi
  33. 8 0
      arch/arm/boot/dts/imx53.dtsi
  34. 1 1
      arch/arm/boot/dts/imx6dl.dtsi
  35. 8 0
      arch/arm/boot/dts/imx6qdl.dtsi
  36. 8 0
      arch/arm/boot/dts/imx6sl.dtsi
  37. 8 0
      arch/arm/boot/dts/imx6sx.dtsi
  38. 8 0
      arch/arm/boot/dts/imx6ul.dtsi
  39. 8 0
      arch/arm/boot/dts/imx7s.dtsi
  40. 2 2
      arch/arm/boot/dts/orion5x-linkstation-lschl.dts
  41. 1 0
      arch/arm/boot/dts/stih407-family.dtsi
  42. 2 2
      arch/arm/configs/ezx_defconfig
  43. 2 2
      arch/arm/configs/imote2_defconfig
  44. 1 1
      arch/arm/kernel/ptrace.c
  45. 1 1
      arch/arm/mach-imx/mmdc.c
  46. 2 2
      arch/arm/mm/fault.c
  47. 4 0
      arch/arm/mm/fault.h
  48. 18 0
      arch/arm64/boot/dts/amlogic/meson-gx.dtsi
  49. 12 0
      arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
  50. 42 46
      arch/arm64/crypto/aes-modes.S
  51. 7 1
      arch/arm64/kernel/topology.c
  52. 7 1
      arch/parisc/include/asm/bitops.h
  53. 0 2
      arch/parisc/include/uapi/asm/bitsperlong.h
  54. 3 2
      arch/parisc/include/uapi/asm/swab.h
  55. 1 1
      arch/powerpc/Kconfig
  56. 2 0
      arch/powerpc/include/asm/cpu_has_feature.h
  57. 2 0
      arch/powerpc/include/asm/mmu.h
  58. 0 4
      arch/powerpc/include/asm/module.h
  59. 2 1
      arch/powerpc/include/asm/reg.h
  60. 0 40
      arch/powerpc/include/asm/stackprotector.h
  61. 1 0
      arch/powerpc/include/asm/xics.h
  62. 0 4
      arch/powerpc/kernel/Makefile
  63. 0 3
      arch/powerpc/kernel/asm-offsets.c
  64. 1 1
      arch/powerpc/kernel/eeh_driver.c
  65. 1 5
      arch/powerpc/kernel/entry_32.S
  66. 0 8
      arch/powerpc/kernel/module_64.c
  67. 0 6
      arch/powerpc/kernel/process.c
  68. 3 0
      arch/powerpc/kernel/prom_init.c
  69. 5 16
      arch/powerpc/mm/fault.c
  70. 2 2
      arch/powerpc/mm/pgtable-radix.c
  71. 1 5
      arch/powerpc/mm/tlb-radix.c
  72. 10 2
      arch/powerpc/platforms/powernv/smp.c
  73. 33 2
      arch/powerpc/sysdev/xics/icp-opal.c
  74. 8 0
      arch/s390/kernel/ptrace.c
  75. 4 3
      arch/s390/mm/pgtable.c
  76. 4 4
      arch/sparc/include/asm/mmu_context_64.h
  77. 1 1
      arch/sparc/kernel/irq_64.c
  78. 3 3
      arch/sparc/kernel/sstate.c
  79. 73 0
      arch/sparc/kernel/traps_64.c
  80. 4 4
      arch/x86/crypto/aesni-intel_glue.c
  81. 26 34
      arch/x86/events/intel/rapl.c
  82. 91 141
      arch/x86/events/intel/uncore.c
  83. 1 0
      arch/x86/include/asm/microcode.h
  84. 1 0
      arch/x86/include/asm/processor.h
  85. 2 2
      arch/x86/kernel/apic/io_apic.c
  86. 15 1
      arch/x86/kernel/cpu/amd.c
  87. 1 0
      arch/x86/kernel/cpu/common.c
  88. 12 19
      arch/x86/kernel/cpu/mcheck/mce.c
  89. 3 2
      arch/x86/kernel/cpu/microcode/amd.c
  90. 17 5
      arch/x86/kernel/cpu/microcode/core.c
  91. 1 8
      arch/x86/kernel/cpu/microcode/intel.c
  92. 3 1
      arch/x86/kernel/fpu/core.c
  93. 1 0
      arch/x86/kernel/hpet.c
  94. 9 3
      arch/x86/kernel/smpboot.c
  95. 3 2
      arch/x86/kernel/tsc.c
  96. 7 9
      arch/x86/kernel/tsc_sync.c
  97. 1 0
      arch/x86/kvm/x86.c
  98. 2 0
      arch/x86/mm/dump_pagetables.c
  99. 16 0
      arch/x86/platform/efi/efi_64.c
  100. 1 1
      arch/xtensa/kernel/setup.c

+ 3 - 0
Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt

@@ -15,6 +15,9 @@ Properties:
   Second cell specifies the irq distribution mode to cores
   Second cell specifies the irq distribution mode to cores
      0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
      0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
 
 
+  The second cell in interrupts property is deprecated and may be ignored by
+  the kernel.
+
   intc accessed via the special ARC AUX register interface, hence "reg" property
   intc accessed via the special ARC AUX register interface, hence "reg" property
   is not specified.
   is not specified.
 
 

+ 1 - 1
Documentation/devicetree/bindings/net/mediatek-net.txt

@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
 * Ethernet controller node
 * Ethernet controller node
 
 
 Required properties:
 Required properties:
-- compatible: Should be "mediatek,mt7623-eth"
+- compatible: Should be "mediatek,mt2701-eth"
 - reg: Address and length of the register set for the device
 - reg: Address and length of the register set for the device
 - interrupts: Should contain the three frame engines interrupts in numeric
 - interrupts: Should contain the three frame engines interrupts in numeric
 	order. These are fe_int0, fe_int1 and fe_int2.
 	order. These are fe_int0, fe_int1 and fe_int2.

+ 3 - 2
Documentation/devicetree/bindings/net/phy.txt

@@ -19,8 +19,9 @@ Optional Properties:
   specifications. If neither of these are specified, the default is to
   specifications. If neither of these are specified, the default is to
   assume clause 22.
   assume clause 22.
 
 
-  If the phy's identifier is known then the list may contain an entry
-  of the form: "ethernet-phy-idAAAA.BBBB" where
+  If the PHY reports an incorrect ID (or none at all) then the
+  "compatible" list may contain an entry with the correct PHY ID in the
+  form: "ethernet-phy-idAAAA.BBBB" where
      AAAA - The value of the 16 bit Phy Identifier 1 register as
      AAAA - The value of the 16 bit Phy Identifier 1 register as
             4 hex digits. This is the chip vendor OUI bits 3:18
             4 hex digits. This is the chip vendor OUI bits 3:18
      BBBB - The value of the 16 bit Phy Identifier 2 register as
      BBBB - The value of the 16 bit Phy Identifier 2 register as

+ 0 - 5
Documentation/media/uapi/cec/cec-func-close.rst

@@ -33,11 +33,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 Closes the cec device. Resources associated with the file descriptor are
 Closes the cec device. Resources associated with the file descriptor are
 freed. The device configuration remain unchanged.
 freed. The device configuration remain unchanged.
 
 

+ 0 - 5
Documentation/media/uapi/cec/cec-func-ioctl.rst

@@ -39,11 +39,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 The :c:func:`ioctl()` function manipulates cec device parameters. The
 The :c:func:`ioctl()` function manipulates cec device parameters. The
 argument ``fd`` must be an open file descriptor.
 argument ``fd`` must be an open file descriptor.
 
 

+ 0 - 5
Documentation/media/uapi/cec/cec-func-open.rst

@@ -46,11 +46,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To open a cec device applications call :c:func:`open()` with the
 To open a cec device applications call :c:func:`open()` with the
 desired device name. The function has no side effects; the device
 desired device name. The function has no side effects; the device
 configuration remain unchanged.
 configuration remain unchanged.

+ 0 - 5
Documentation/media/uapi/cec/cec-func-poll.rst

@@ -39,11 +39,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 With the :c:func:`poll()` function applications can wait for CEC
 With the :c:func:`poll()` function applications can wait for CEC
 events.
 events.
 
 

+ 12 - 5
Documentation/media/uapi/cec/cec-intro.rst

@@ -3,11 +3,6 @@
 Introduction
 Introduction
 ============
 ============
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 HDMI connectors provide a single pin for use by the Consumer Electronics
 HDMI connectors provide a single pin for use by the Consumer Electronics
 Control protocol. This protocol allows different devices connected by an
 Control protocol. This protocol allows different devices connected by an
 HDMI cable to communicate. The protocol for CEC version 1.4 is defined
 HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@@ -31,3 +26,15 @@ control just the CEC pin.
 Drivers that support CEC will create a CEC device node (/dev/cecX) to
 Drivers that support CEC will create a CEC device node (/dev/cecX) to
 give userspace access to the CEC adapter. The
 give userspace access to the CEC adapter. The
 :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
 :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
+
+In order to check the support and test it, it is suggested to download
+the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
+provides three tools to handle CEC:
+
+- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
+  and monitor CEC messages.
+
+- cec-compliance: does a CEC compliance test of a remote CEC device to
+  determine how compliant the CEC implementation is.
+
+- cec-follower: emulates a CEC follower.

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst

@@ -29,11 +29,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 device information, applications call the ioctl with a pointer to a
 device information, applications call the ioctl with a pointer to a
 struct :c:type:`cec_caps`. The driver fills the structure and
 struct :c:type:`cec_caps`. The driver fills the structure and

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst

@@ -35,11 +35,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current CEC logical addresses, applications call
 To query the current CEC logical addresses, applications call
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
 struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
 struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst

@@ -35,11 +35,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current physical address applications call
 To query the current physical address applications call
 :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
 :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
 driver stores the physical address.
 driver stores the physical address.

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-dqevent.rst

@@ -30,11 +30,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 CEC devices can send asynchronous events. These can be retrieved by
 CEC devices can send asynchronous events. These can be retrieved by
 calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
 calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
 non-blocking mode and no event is pending, then it will return -1 and
 non-blocking mode and no event is pending, then it will return -1 and

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-g-mode.rst

@@ -31,11 +31,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
 applications from stepping on each others toes it must be possible to
 applications from stepping on each others toes it must be possible to
 obtain exclusive access to the CEC adapter. This ioctl sets the
 obtain exclusive access to the CEC adapter. This ioctl sets the

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-receive.rst

@@ -34,11 +34,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To receive a CEC message the application has to fill in the
 To receive a CEC message the application has to fill in the
 ``timeout`` field of struct :c:type:`cec_msg` and pass it to
 ``timeout`` field of struct :c:type:`cec_msg` and pass it to
 :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
 :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.

+ 1 - 3
Documentation/power/states.txt

@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
 The default suspend mode (ie. the one to be used without writing anything into
 The default suspend mode (ie. the one to be used without writing anything into
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line.  On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
 
 
 The properties of all of the sleep states are described below.
 The properties of all of the sleep states are described below.
 
 

+ 21 - 22
MAINTAINERS

@@ -1091,7 +1091,7 @@ F:	arch/arm/boot/dts/aspeed-*
 F:	drivers/*/*aspeed*
 F:	drivers/*/*aspeed*
 
 
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 M:	Alexandre Belloni <alexandre.belloni@free-electrons.com>
 M:	Alexandre Belloni <alexandre.belloni@free-electrons.com>
 M:	Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 M:	Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1773,7 +1773,7 @@ F:	drivers/soc/renesas/
 F:	include/linux/soc/renesas/
 F:	include/linux/soc/renesas/
 
 
 ARM/SOCFPGA ARCHITECTURE
 ARM/SOCFPGA ARCHITECTURE
-M:	Dinh Nguyen <dinguyen@opensource.altera.com>
+M:	Dinh Nguyen <dinguyen@kernel.org>
 S:	Maintained
 S:	Maintained
 F:	arch/arm/mach-socfpga/
 F:	arch/arm/mach-socfpga/
 F:	arch/arm/boot/dts/socfpga*
 F:	arch/arm/boot/dts/socfpga*
@@ -1783,7 +1783,7 @@ W:	http://www.rocketboards.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 
 
 ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
 ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
-M:	Dinh Nguyen <dinguyen@opensource.altera.com>
+M:	Dinh Nguyen <dinguyen@kernel.org>
 S:	Maintained
 S:	Maintained
 F:	drivers/clk/socfpga/
 F:	drivers/clk/socfpga/
 
 
@@ -2175,56 +2175,56 @@ F:	include/linux/atm*
 F:	include/uapi/linux/atm*
 F:	include/uapi/linux/atm*
 
 
 ATMEL AT91 / AT32 MCI DRIVER
 ATMEL AT91 / AT32 MCI DRIVER
-M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+M:	Ludovic Desroches <ludovic.desroches@microchip.com>
 S:	Maintained
 S:	Maintained
 F:	drivers/mmc/host/atmel-mci.c
 F:	drivers/mmc/host/atmel-mci.c
 
 
 ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
 ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 S:	Supported
 S:	Supported
 F:	drivers/power/reset/at91-sama5d2_shdwc.c
 F:	drivers/power/reset/at91-sama5d2_shdwc.c
 
 
 ATMEL SAMA5D2 ADC DRIVER
 ATMEL SAMA5D2 ADC DRIVER
-M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+M:	Ludovic Desroches <ludovic.desroches@microchip.com>
 L:	linux-iio@vger.kernel.org
 L:	linux-iio@vger.kernel.org
 S:	Supported
 S:	Supported
 F:	drivers/iio/adc/at91-sama5d2_adc.c
 F:	drivers/iio/adc/at91-sama5d2_adc.c
 
 
 ATMEL Audio ALSA driver
 ATMEL Audio ALSA driver
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Supported
 S:	Supported
 F:	sound/soc/atmel
 F:	sound/soc/atmel
 
 
 ATMEL XDMA DRIVER
 ATMEL XDMA DRIVER
-M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+M:	Ludovic Desroches <ludovic.desroches@microchip.com>
 L:	linux-arm-kernel@lists.infradead.org
 L:	linux-arm-kernel@lists.infradead.org
 L:	dmaengine@vger.kernel.org
 L:	dmaengine@vger.kernel.org
 S:	Supported
 S:	Supported
 F:	drivers/dma/at_xdmac.c
 F:	drivers/dma/at_xdmac.c
 
 
 ATMEL I2C DRIVER
 ATMEL I2C DRIVER
-M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+M:	Ludovic Desroches <ludovic.desroches@microchip.com>
 L:	linux-i2c@vger.kernel.org
 L:	linux-i2c@vger.kernel.org
 S:	Supported
 S:	Supported
 F:	drivers/i2c/busses/i2c-at91.c
 F:	drivers/i2c/busses/i2c-at91.c
 
 
 ATMEL ISI DRIVER
 ATMEL ISI DRIVER
-M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+M:	Ludovic Desroches <ludovic.desroches@microchip.com>
 L:	linux-media@vger.kernel.org
 L:	linux-media@vger.kernel.org
 S:	Supported
 S:	Supported
 F:	drivers/media/platform/soc_camera/atmel-isi.c
 F:	drivers/media/platform/soc_camera/atmel-isi.c
 F:	include/media/atmel-isi.h
 F:	include/media/atmel-isi.h
 
 
 ATMEL LCDFB DRIVER
 ATMEL LCDFB DRIVER
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 L:	linux-fbdev@vger.kernel.org
 L:	linux-fbdev@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/video/fbdev/atmel_lcdfb.c
 F:	drivers/video/fbdev/atmel_lcdfb.c
 F:	include/video/atmel_lcdc.h
 F:	include/video/atmel_lcdc.h
 
 
 ATMEL MACB ETHERNET DRIVER
 ATMEL MACB ETHERNET DRIVER
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 S:	Supported
 S:	Supported
 F:	drivers/net/ethernet/cadence/
 F:	drivers/net/ethernet/cadence/
 
 
@@ -2236,32 +2236,32 @@ S:	Supported
 F:	drivers/mtd/nand/atmel_nand*
 F:	drivers/mtd/nand/atmel_nand*
 
 
 ATMEL SDMMC DRIVER
 ATMEL SDMMC DRIVER
-M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+M:	Ludovic Desroches <ludovic.desroches@microchip.com>
 L:	linux-mmc@vger.kernel.org
 L:	linux-mmc@vger.kernel.org
 S:	Supported
 S:	Supported
 F:	drivers/mmc/host/sdhci-of-at91.c
 F:	drivers/mmc/host/sdhci-of-at91.c
 
 
 ATMEL SPI DRIVER
 ATMEL SPI DRIVER
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 S:	Supported
 S:	Supported
 F:	drivers/spi/spi-atmel.*
 F:	drivers/spi/spi-atmel.*
 
 
 ATMEL SSC DRIVER
 ATMEL SSC DRIVER
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Supported
 S:	Supported
 F:	drivers/misc/atmel-ssc.c
 F:	drivers/misc/atmel-ssc.c
 F:	include/linux/atmel-ssc.h
 F:	include/linux/atmel-ssc.h
 
 
 ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
 ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Supported
 S:	Supported
 F:	drivers/misc/atmel_tclib.c
 F:	drivers/misc/atmel_tclib.c
 F:	drivers/clocksource/tcb_clksrc.c
 F:	drivers/clocksource/tcb_clksrc.c
 
 
 ATMEL USBA UDC DRIVER
 ATMEL USBA UDC DRIVER
-M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Nicolas Ferre <nicolas.ferre@microchip.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Supported
 S:	Supported
 F:	drivers/usb/gadget/udc/atmel_usba_udc.*
 F:	drivers/usb/gadget/udc/atmel_usba_udc.*
@@ -3567,7 +3567,7 @@ F:	drivers/infiniband/hw/cxgb3/
 F:	include/uapi/rdma/cxgb3-abi.h
 F:	include/uapi/rdma/cxgb3-abi.h
 
 
 CXGB4 ETHERNET DRIVER (CXGB4)
 CXGB4 ETHERNET DRIVER (CXGB4)
-M:	Hariprasad S <hariprasad@chelsio.com>
+M:	Ganesh Goudar <ganeshgr@chelsio.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://www.chelsio.com
 W:	http://www.chelsio.com
 S:	Supported
 S:	Supported
@@ -4154,7 +4154,7 @@ F:	Documentation/gpu/i915.rst
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
-L:      igvt-g-dev@lists.01.org
+L:      intel-gvt-dev@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 W:      https://01.org/igvt-g
 W:      https://01.org/igvt-g
 T:      git https://github.com/01org/gvt-linux.git
 T:      git https://github.com/01org/gvt-linux.git
@@ -9743,7 +9743,7 @@ S:	Maintained
 F:	drivers/pinctrl/pinctrl-at91.*
 F:	drivers/pinctrl/pinctrl-at91.*
 
 
 PIN CONTROLLER - ATMEL AT91 PIO4
 PIN CONTROLLER - ATMEL AT91 PIO4
-M:	Ludovic Desroches <ludovic.desroches@atmel.com>
+M:	Ludovic Desroches <ludovic.desroches@microchip.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-gpio@vger.kernel.org
 L:	linux-gpio@vger.kernel.org
 S:	Supported
 S:	Supported
@@ -10202,7 +10202,6 @@ F:	drivers/media/tuners/qt1010*
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
-L:	ath9k-devel@lists.ath9k.org
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 S:	Supported
 S:	Supported
 F:	drivers/net/wireless/ath/ath9k/
 F:	drivers/net/wireless/ath/ath9k/
@@ -13073,7 +13072,7 @@ F:	drivers/input/serio/userio.c
 F:	include/uapi/linux/userio.h
 F:	include/uapi/linux/userio.h
 
 
 VIRTIO CONSOLE DRIVER
 VIRTIO CONSOLE DRIVER
-M:	Amit Shah <amit.shah@redhat.com>
+M:	Amit Shah <amit@kernel.org>
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 S:	Maintained
 F:	drivers/char/virtio_console.c
 F:	drivers/char/virtio_console.c

+ 3 - 3
Makefile

@@ -1,8 +1,8 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 10
 PATCHLEVEL = 10
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Anniversary Edition
+EXTRAVERSION = -rc8
+NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
 # To see a list of typical targets execute "make help"
@@ -797,7 +797,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 
 # check for 'asm goto'
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 endif

+ 3 - 1
arch/arc/include/asm/delay.h

@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
 	"	lp  1f			\n"
 	"	lp  1f			\n"
 	"	nop			\n"
 	"	nop			\n"
 	"1:				\n"
 	"1:				\n"
-	: : "r"(loops));
+	:
+        : "r"(loops)
+        : "lp_count");
 }
 }
 
 
 extern void __bad_udelay(void);
 extern void __bad_udelay(void);

+ 7 - 7
arch/arc/kernel/head.S

@@ -71,14 +71,14 @@ ENTRY(stext)
 	GET_CPU_ID  r5
 	GET_CPU_ID  r5
 	cmp	r5, 0
 	cmp	r5, 0
 	mov.nz	r0, r5
 	mov.nz	r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
-	; Non-Master can proceed as system would be booted sufficiently
-	jnz	first_lines_of_secondary
-#else
+	bz	.Lmaster_proceed
+
 	; Non-Masters wait for Master to boot enough and bring them up
 	; Non-Masters wait for Master to boot enough and bring them up
-	jnz	arc_platform_smp_wait_to_boot
-#endif
-	; Master falls thru
+	; when they resume, tail-call to entry point
+	mov	blink, @first_lines_of_secondary
+	j	arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
 #endif
 #endif
 
 
 	; Clear BSS before updating any globals
 	; Clear BSS before updating any globals

+ 23 - 32
arch/arc/kernel/mcip.c

@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 
 
 	sprintf(smp_cpuinfo_buf,
 	sprintf(smp_cpuinfo_buf,
-		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
+		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
 		mp.ver, mp.num_cores,
 		mp.ver, mp.num_cores,
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.idu, "IDU "),
 		IS_AVAIL1(mp.idu, "IDU "),
-		IS_AVAIL1(mp.llm, "LLM "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.gfrc, "GFRC"));
 		IS_AVAIL1(mp.gfrc, "GFRC"));
 
 
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 }
 }
 
 
-#ifdef CONFIG_SMP
 static int
 static int
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 		     bool force)
 		     bool force)
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 
 
 	return IRQ_SET_MASK_OK;
 	return IRQ_SET_MASK_OK;
 }
 }
-#endif
+
+static void idu_irq_enable(struct irq_data *data)
+{
+	/*
+	 * By default send all common interrupts to all available online CPUs.
+	 * The affinity of common interrupts in IDU must be set manually since
+	 * in some cases the kernel will not call irq_set_affinity() by itself:
+	 *   1. When the kernel is not configured with support of SMP.
+	 *   2. When the kernel is configured with support of SMP but upper
+	 *      interrupt controllers does not support setting of the affinity
+	 *      and cannot propagate it to IDU.
+	 */
+	idu_irq_set_affinity(data, cpu_online_mask, false);
+	idu_irq_unmask(data);
+}
 
 
 static struct irq_chip idu_irq_chip = {
 static struct irq_chip idu_irq_chip = {
 	.name			= "MCIP IDU Intc",
 	.name			= "MCIP IDU Intc",
 	.irq_mask		= idu_irq_mask,
 	.irq_mask		= idu_irq_mask,
 	.irq_unmask		= idu_irq_unmask,
 	.irq_unmask		= idu_irq_unmask,
+	.irq_enable		= idu_irq_enable,
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	.irq_set_affinity       = idu_irq_set_affinity,
 	.irq_set_affinity       = idu_irq_set_affinity,
 #endif
 #endif
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
 			 const u32 *intspec, unsigned int intsize,
 			 const u32 *intspec, unsigned int intsize,
 			 irq_hw_number_t *out_hwirq, unsigned int *out_type)
 			 irq_hw_number_t *out_hwirq, unsigned int *out_type)
 {
 {
-	irq_hw_number_t hwirq = *out_hwirq = intspec[0];
-	int distri = intspec[1];
-	unsigned long flags;
-
+	/*
+	 * Ignore value of interrupt distribution mode for common interrupts in
+	 * IDU which resides in intspec[1] since setting an affinity using value
+	 * from Device Tree is deprecated in ARC.
+	 */
+	*out_hwirq = intspec[0];
 	*out_type = IRQ_TYPE_NONE;
 	*out_type = IRQ_TYPE_NONE;
 
 
-	/* XXX: validate distribution scheme again online cpu mask */
-	if (distri == 0) {
-		/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
-		raw_spin_lock_irqsave(&mcip_lock, flags);
-		idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
-		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
-		raw_spin_unlock_irqrestore(&mcip_lock, flags);
-	} else {
-		/*
-		 * DEST based distribution for Level Triggered intr can only
-		 * have 1 CPU, so generalize it to always contain 1 cpu
-		 */
-		int cpu = ffs(distri);
-
-		if (cpu != fls(distri))
-			pr_warn("IDU irq %lx distri mode set to cpu %x\n",
-				hwirq, cpu);
-
-		raw_spin_lock_irqsave(&mcip_lock, flags);
-		idu_set_dest(hwirq, cpu);
-		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
-		raw_spin_unlock_irqrestore(&mcip_lock, flags);
-	}
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 20 - 5
arch/arc/kernel/smp.c

@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
  */
  */
 static volatile int wake_flag;
 static volatile int wake_flag;
 
 
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f)		f
+#define __boot_write(f, v)	f = v
+
+#else
+
+#define __boot_read(f)		arc_read_uncached_32(&f)
+#define __boot_write(f, v)	arc_write_uncached_32(&f, v)
+
+#endif
+
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 {
 {
 	BUG_ON(cpu == 0);
 	BUG_ON(cpu == 0);
-	wake_flag = cpu;
+
+	__boot_write(wake_flag, cpu);
 }
 }
 
 
 void arc_platform_smp_wait_to_boot(int cpu)
 void arc_platform_smp_wait_to_boot(int cpu)
 {
 {
-	while (wake_flag != cpu)
+	/* for halt-on-reset, we've waited already */
+	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+		return;
+
+	while (__boot_read(wake_flag) != cpu)
 		;
 		;
 
 
-	wake_flag = 0;
-	__asm__ __volatile__("j @first_lines_of_secondary	\n");
+	__boot_write(wake_flag, 0);
 }
 }
 
 
-
 const char *arc_platform_smp_cpuinfo(void)
 const char *arc_platform_smp_cpuinfo(void)
 {
 {
 	return plat_smp_ops.info ? : "";
 	return plat_smp_ops.info ? : "";

+ 2 - 1
arch/arc/kernel/unaligned.c

@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
 	if (state.fault)
 	if (state.fault)
 		goto fault;
 		goto fault;
 
 
+	/* clear any remanants of delay slot */
 	if (delay_mode(regs)) {
 	if (delay_mode(regs)) {
-		regs->ret = regs->bta;
+		regs->ret = regs->bta & ~1U;
 		regs->status32 &= ~STATUS_DE_MASK;
 		regs->status32 &= ~STATUS_DE_MASK;
 	} else {
 	} else {
 		regs->ret += state.instr_len;
 		regs->ret += state.instr_len;

+ 1 - 1
arch/arm/boot/dts/Makefile

@@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
 	orion5x-lacie-ethernet-disk-mini-v2.dtb \
 	orion5x-lacie-ethernet-disk-mini-v2.dtb \
 	orion5x-linkstation-lsgl.dtb \
 	orion5x-linkstation-lsgl.dtb \
 	orion5x-linkstation-lswtgl.dtb \
 	orion5x-linkstation-lswtgl.dtb \
-	orion5x-lschl.dtb \
+	orion5x-linkstation-lschl.dtb \
 	orion5x-lswsgl.dtb \
 	orion5x-lswsgl.dtb \
 	orion5x-maxtor-shared-storage-2.dtb \
 	orion5x-maxtor-shared-storage-2.dtb \
 	orion5x-netgear-wnr854t.dtb \
 	orion5x-netgear-wnr854t.dtb \

+ 8 - 0
arch/arm/boot/dts/imx1.dtsi

@@ -18,6 +18,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		gpio0 = &gpio1;
 		gpio0 = &gpio1;

+ 8 - 0
arch/arm/boot/dts/imx23.dtsi

@@ -16,6 +16,14 @@
 	#size-cells = <1>;
 	#size-cells = <1>;
 
 
 	interrupt-parent = <&icoll>;
 	interrupt-parent = <&icoll>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		gpio0 = &gpio0;
 		gpio0 = &gpio0;

+ 8 - 0
arch/arm/boot/dts/imx25.dtsi

@@ -14,6 +14,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 8 - 0
arch/arm/boot/dts/imx27.dtsi

@@ -19,6 +19,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 8 - 0
arch/arm/boot/dts/imx28.dtsi

@@ -17,6 +17,14 @@
 	#size-cells = <1>;
 	#size-cells = <1>;
 
 
 	interrupt-parent = <&icoll>;
 	interrupt-parent = <&icoll>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &mac0;
 		ethernet0 = &mac0;

+ 8 - 0
arch/arm/boot/dts/imx31.dtsi

@@ -12,6 +12,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		serial0 = &uart1;
 		serial0 = &uart1;

+ 8 - 0
arch/arm/boot/dts/imx35.dtsi

@@ -13,6 +13,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 8 - 0
arch/arm/boot/dts/imx50.dtsi

@@ -17,6 +17,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 8 - 0
arch/arm/boot/dts/imx51.dtsi

@@ -19,6 +19,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 8 - 0
arch/arm/boot/dts/imx53.dtsi

@@ -19,6 +19,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 1 - 1
arch/arm/boot/dts/imx6dl.dtsi

@@ -137,7 +137,7 @@
 &gpio4 {
 &gpio4 {
 	gpio-ranges = <&iomuxc  5 136 1>, <&iomuxc  6 145 1>, <&iomuxc  7 150 1>,
 	gpio-ranges = <&iomuxc  5 136 1>, <&iomuxc  6 145 1>, <&iomuxc  7 150 1>,
 		      <&iomuxc  8 146 1>, <&iomuxc  9 151 1>, <&iomuxc 10 147 1>,
 		      <&iomuxc  8 146 1>, <&iomuxc  9 151 1>, <&iomuxc 10 147 1>,
-		      <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
+		      <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
 		      <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16  39 7>,
 		      <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16  39 7>,
 		      <&iomuxc 23  56 1>, <&iomuxc 24  61 7>, <&iomuxc 31  46 1>;
 		      <&iomuxc 23  56 1>, <&iomuxc 24  61 7>, <&iomuxc 31  46 1>;
 };
 };

+ 8 - 0
arch/arm/boot/dts/imx6qdl.dtsi

@@ -16,6 +16,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 8 - 0
arch/arm/boot/dts/imx6sl.dtsi

@@ -14,6 +14,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec;
 		ethernet0 = &fec;

+ 8 - 0
arch/arm/boot/dts/imx6sx.dtsi

@@ -15,6 +15,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		can0 = &flexcan1;
 		can0 = &flexcan1;

+ 8 - 0
arch/arm/boot/dts/imx6ul.dtsi

@@ -15,6 +15,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		ethernet0 = &fec1;
 		ethernet0 = &fec1;

+ 8 - 0
arch/arm/boot/dts/imx7s.dtsi

@@ -50,6 +50,14 @@
 / {
 / {
 	#address-cells = <1>;
 	#address-cells = <1>;
 	#size-cells = <1>;
 	#size-cells = <1>;
+	/*
+	 * The decompressor and also some bootloaders rely on a
+	 * pre-existing /chosen node to be available to insert the
+	 * command line and merge other ATAGS info.
+	 * Also for U-Boot there must be a pre-existing /memory node.
+	 */
+	chosen {};
+	memory { device_type = "memory"; reg = <0 0>; };
 
 
 	aliases {
 	aliases {
 		gpio0 = &gpio1;
 		gpio0 = &gpio1;

+ 2 - 2
arch/arm/boot/dts/orion5x-lschl.dts → arch/arm/boot/dts/orion5x-linkstation-lschl.dts

@@ -2,7 +2,7 @@
  * Device Tree file for Buffalo Linkstation LS-CHLv3
  * Device Tree file for Buffalo Linkstation LS-CHLv3
  *
  *
  * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
  * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
- * Copyright (C) 2015, 2016
+ * Copyright (C) 2015-2017
  * Roger Shimizu <rogershimizu@gmail.com>
  * Roger Shimizu <rogershimizu@gmail.com>
  *
  *
  * This file is dual-licensed: you can use it either under the terms
  * This file is dual-licensed: you can use it either under the terms
@@ -52,7 +52,7 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/gpio/gpio.h>
 
 
 / {
 / {
-	model = "Buffalo Linkstation Live v3 (LS-CHL)";
+	model = "Buffalo Linkstation LiveV3 (LS-CHL)";
 	compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
 	compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
 
 
 	memory { /* 128 MB */
 	memory { /* 128 MB */

+ 1 - 0
arch/arm/boot/dts/stih407-family.dtsi

@@ -680,6 +680,7 @@
 				phy-names	= "usb2-phy", "usb3-phy";
 				phy-names	= "usb2-phy", "usb3-phy";
 				phys		= <&usb2_picophy0>,
 				phys		= <&usb2_picophy0>,
 						  <&phy_port2 PHY_TYPE_USB3>;
 						  <&phy_port2 PHY_TYPE_USB3>;
+				snps,dis_u3_susphy_quirk;
 			};
 			};
 		};
 		};
 
 

+ 2 - 2
arch/arm/configs/ezx_defconfig

@@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_H323=m

+ 2 - 2
arch/arm/configs/imote2_defconfig

@@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_H323=m

+ 1 - 1
arch/arm/kernel/ptrace.c

@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
 		   const void *kbuf, const void __user *ubuf)
 		   const void *kbuf, const void __user *ubuf)
 {
 {
 	int ret;
 	int ret;
-	struct pt_regs newregs;
+	struct pt_regs newregs = *task_pt_regs(target);
 
 
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 				 &newregs,
 				 &newregs,

+ 1 - 1
arch/arm/mach-imx/mmdc.c

@@ -60,7 +60,6 @@
 
 
 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
 
 
-static enum cpuhp_state cpuhp_mmdc_state;
 static int ddr_type;
 static int ddr_type;
 
 
 struct fsl_mmdc_devtype_data {
 struct fsl_mmdc_devtype_data {
@@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
 
 
 #ifdef CONFIG_PERF_EVENTS
 #ifdef CONFIG_PERF_EVENTS
 
 
+static enum cpuhp_state cpuhp_mmdc_state;
 static DEFINE_IDA(mmdc_ida);
 static DEFINE_IDA(mmdc_ida);
 
 
 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")

+ 2 - 2
arch/arm/mm/fault.c

@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
 
 
 void __init early_abt_enable(void)
 void __init early_abt_enable(void)
 {
 {
-	fsr_info[22].fn = early_abort_handler;
+	fsr_info[FSR_FS_AEA].fn = early_abort_handler;
 	local_abt_enable();
 	local_abt_enable();
-	fsr_info[22].fn = do_bad;
+	fsr_info[FSR_FS_AEA].fn = do_bad;
 }
 }
 
 
 #ifndef CONFIG_ARM_LPAE
 #ifndef CONFIG_ARM_LPAE

+ 4 - 0
arch/arm/mm/fault.h

@@ -11,11 +11,15 @@
 #define FSR_FS5_0		(0x3f)
 #define FSR_FS5_0		(0x3f)
 
 
 #ifdef CONFIG_ARM_LPAE
 #ifdef CONFIG_ARM_LPAE
+#define FSR_FS_AEA		17
+
 static inline int fsr_fs(unsigned int fsr)
 static inline int fsr_fs(unsigned int fsr)
 {
 {
 	return fsr & FSR_FS5_0;
 	return fsr & FSR_FS5_0;
 }
 }
 #else
 #else
+#define FSR_FS_AEA		22
+
 static inline int fsr_fs(unsigned int fsr)
 static inline int fsr_fs(unsigned int fsr)
 {
 {
 	return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
 	return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;

+ 18 - 0
arch/arm64/boot/dts/amlogic/meson-gx.dtsi

@@ -55,6 +55,24 @@
 	#address-cells = <2>;
 	#address-cells = <2>;
 	#size-cells = <2>;
 	#size-cells = <2>;
 
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* 16 MiB reserved for Hardware ROM Firmware */
+		hwrom_reserved: hwrom@0 {
+			reg = <0x0 0x0 0x0 0x1000000>;
+			no-map;
+		};
+
+		/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+		secmon_reserved: secmon@10000000 {
+			reg = <0x0 0x10000000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
 	cpus {
 	cpus {
 		#address-cells = <0x2>;
 		#address-cells = <0x2>;
 		#size-cells = <0x0>;
 		#size-cells = <0x0>;

+ 12 - 0
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts

@@ -151,6 +151,18 @@
 	status = "okay";
 	status = "okay";
 	pinctrl-0 = <&eth_rgmii_pins>;
 	pinctrl-0 = <&eth_rgmii_pins>;
 	pinctrl-names = "default";
 	pinctrl-names = "default";
+	phy-handle = <&eth_phy0>;
+
+	mdio {
+		compatible = "snps,dwmac-mdio";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		eth_phy0: ethernet-phy@0 {
+			reg = <0>;
+			eee-broken-1000t;
+		};
+	};
 };
 };
 
 
 &ir {
 &ir {

+ 42 - 46
arch/arm64/crypto/aes-modes.S

@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
 	cbz		w6, .Lcbcencloop
 	cbz		w6, .Lcbcencloop
 
 
 	ld1		{v0.16b}, [x5]			/* get iv */
 	ld1		{v0.16b}, [x5]			/* get iv */
-	enc_prepare	w3, x2, x5
+	enc_prepare	w3, x2, x6
 
 
 .Lcbcencloop:
 .Lcbcencloop:
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
-	encrypt_block	v0, w3, x2, x5, w6
+	encrypt_block	v0, w3, x2, x6, w7
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bne		.Lcbcencloop
 	bne		.Lcbcencloop
+	st1		{v0.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_encrypt)
 AES_ENDPROC(aes_cbc_encrypt)
 
 
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	cbz		w6, .LcbcdecloopNx
 	cbz		w6, .LcbcdecloopNx
 
 
 	ld1		{v7.16b}, [x5]			/* get iv */
 	ld1		{v7.16b}, [x5]			/* get iv */
-	dec_prepare	w3, x2, x5
+	dec_prepare	w3, x2, x6
 
 
 .LcbcdecloopNx:
 .LcbcdecloopNx:
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
 .Lcbcdecloop:
 .Lcbcdecloop:
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
-	decrypt_block	v0, w3, x2, x5, w6
+	decrypt_block	v0, w3, x2, x6, w7
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	bne		.Lcbcdecloop
 	bne		.Lcbcdecloop
 .Lcbcdecout:
 .Lcbcdecout:
 	FRAME_POP
 	FRAME_POP
+	st1		{v7.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_decrypt)
 AES_ENDPROC(aes_cbc_decrypt)
 
 
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
 
 
 AES_ENTRY(aes_ctr_encrypt)
 AES_ENTRY(aes_ctr_encrypt)
 	FRAME_PUSH
 	FRAME_PUSH
-	cbnz		w6, .Lctrfirst		/* 1st time around? */
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
-#if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
-	bcs		.Lctrinc
-	add		x5, x5, #1		/* increment BE ctr */
-	b		.LctrincNx
-#else
-	b		.Lctrinc
-#endif
-.Lctrfirst:
+	cbz		w6, .Lctrnotfirst	/* 1st time around? */
 	enc_prepare	w3, x2, x6
 	enc_prepare	w3, x2, x6
 	ld1		{v4.16b}, [x5]
 	ld1		{v4.16b}, [x5]
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
+
+.Lctrnotfirst:
+	umov		x8, v4.d[1]		/* keep swabbed ctr in reg */
+	rev		x8, x8
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
+	cmn		w8, w4			/* 32 bit overflow? */
 	bcs		.Lctrloop
 	bcs		.Lctrloop
 .LctrloopNx:
 .LctrloopNx:
 	subs		w4, w4, #INTERLEAVE
 	subs		w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
 #if INTERLEAVE == 2
 #if INTERLEAVE == 2
 	mov		v0.8b, v4.8b
 	mov		v0.8b, v4.8b
 	mov		v1.8b, v4.8b
 	mov		v1.8b, v4.8b
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v0.d[1], x7
 	ins		v0.d[1], x7
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v1.d[1], x7
 	ins		v1.d[1], x7
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	do_encrypt_block2x
 	do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
 	st1		{v0.16b-v1.16b}, [x0], #32
 	st1		{v0.16b-v1.16b}, [x0], #32
 #else
 #else
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
-	dup		v7.4s, w5
+	dup		v7.4s, w8
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	add		v7.4s, v7.4s, v8.4s
 	add		v7.4s, v7.4s, v8.4s
 	mov		v1.16b, v4.16b
 	mov		v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v3.16b, v5.16b, v3.16b
 	eor		v3.16b, v5.16b, v3.16b
 	st1		{v0.16b-v3.16b}, [x0], #64
 	st1		{v0.16b-v3.16b}, [x0], #64
-	add		x5, x5, #INTERLEAVE
+	add		x8, x8, #INTERLEAVE
 #endif
 #endif
-	cbz		w4, .LctroutNx
-.LctrincNx:
-	rev		x7, x5
+	rev		x7, x8
 	ins		v4.d[1], x7
 	ins		v4.d[1], x7
+	cbz		w4, .Lctrout
 	b		.LctrloopNx
 	b		.LctrloopNx
-.LctroutNx:
-	sub		x5, x5, #1
-	rev		x7, x5
-	ins		v4.d[1], x7
-	b		.Lctrout
 .Lctr1x:
 .Lctr1x:
 	adds		w4, w4, #INTERLEAVE
 	adds		w4, w4, #INTERLEAVE
 	beq		.Lctrout
 	beq		.Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
 .Lctrloop:
 .Lctrloop:
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	encrypt_block	v0, w3, x2, x6, w7
 	encrypt_block	v0, w3, x2, x6, w7
+
+	adds		x8, x8, #1		/* increment BE ctr */
+	rev		x7, x8
+	ins		v4.d[1], x7
+	bcs		.Lctrcarry		/* overflow? */
+
+.Lctrcarrydone:
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	ld1		{v3.16b}, [x1], #16
 	ld1		{v3.16b}, [x1], #16
 	eor		v3.16b, v0.16b, v3.16b
 	eor		v3.16b, v0.16b, v3.16b
 	st1		{v3.16b}, [x0], #16
 	st1		{v3.16b}, [x0], #16
-	beq		.Lctrout
-.Lctrinc:
-	adds		x5, x5, #1		/* increment BE ctr */
-	rev		x7, x5
-	ins		v4.d[1], x7
-	bcc		.Lctrloop		/* no overflow? */
-	umov		x7, v4.d[0]		/* load upper word of ctr  */
-	rev		x7, x7			/* ... to handle the carry */
-	add		x7, x7, #1
-	rev		x7, x7
-	ins		v4.d[0], x7
-	b		.Lctrloop
+	bne		.Lctrloop
+
+.Lctrout:
+	st1		{v4.16b}, [x5]		/* return next CTR value */
+	FRAME_POP
+	ret
+
 .Lctrhalfblock:
 .Lctrhalfblock:
 	ld1		{v3.8b}, [x1]
 	ld1		{v3.8b}, [x1]
 	eor		v3.8b, v0.8b, v3.8b
 	eor		v3.8b, v0.8b, v3.8b
 	st1		{v3.8b}, [x0]
 	st1		{v3.8b}, [x0]
-.Lctrout:
 	FRAME_POP
 	FRAME_POP
 	ret
 	ret
+
+.Lctrcarry:
+	umov		x7, v4.d[0]		/* load upper word of ctr  */
+	rev		x7, x7			/* ... to handle the carry */
+	add		x7, x7, #1
+	rev		x7, x7
+	ins		v4.d[0], x7
+	b		.Lctrcarrydone
 AES_ENDPROC(aes_ctr_encrypt)
 AES_ENDPROC(aes_ctr_encrypt)
 	.ltorg
 	.ltorg
 
 

+ 7 - 1
arch/arm64/kernel/topology.c

@@ -11,6 +11,7 @@
  * for more details.
  * for more details.
  */
  */
 
 
+#include <linux/acpi.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
 #include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
 
 
 static int __init register_cpufreq_notifier(void)
 static int __init register_cpufreq_notifier(void)
 {
 {
-	if (cap_parsing_failed)
+	/*
+	 * on ACPI-based systems we need to use the default cpu capacity
+	 * until we have the necessary code to parse the cpu capacity, so
+	 * skip registering cpufreq notifier.
+	 */
+	if (!acpi_disabled || cap_parsing_failed)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {

+ 7 - 1
arch/parisc/include/asm/bitops.h

@@ -6,7 +6,7 @@
 #endif
 #endif
 
 
 #include <linux/compiler.h>
 #include <linux/compiler.h>
-#include <asm/types.h>		/* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 #include <asm/barrier.h>
 #include <linux/atomic.h>
 #include <linux/atomic.h>
@@ -17,6 +17,12 @@
  * to include/asm-i386/bitops.h or kerneldoc
  * to include/asm-i386/bitops.h or kerneldoc
  */
  */
 
 
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
 
 
 
 

+ 0 - 2
arch/parisc/include/uapi/asm/bitsperlong.h

@@ -3,10 +3,8 @@
 
 
 #if defined(__LP64__)
 #if defined(__LP64__)
 #define __BITS_PER_LONG 64
 #define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
 #else
 #else
 #define __BITS_PER_LONG 32
 #define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
 #endif
 #endif
 
 
 #include <asm-generic/bitsperlong.h>
 #include <asm-generic/bitsperlong.h>

+ 3 - 2
arch/parisc/include/uapi/asm/swab.h

@@ -1,6 +1,7 @@
 #ifndef _PARISC_SWAB_H
 #ifndef _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 
 
+#include <asm/bitsperlong.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 
 
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 }
 }
 #define __arch_swab32 __arch_swab32
 #define __arch_swab32 __arch_swab32
 
 
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
 /*
 /*
 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
 ** See Appendix I page 8 , "Endian Byte Swapping".
 ** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 	return x;
 	return x;
 }
 }
 #define __arch_swab64 __arch_swab64
 #define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
 
 
 #endif /* _PARISC_SWAB_H */
 #endif /* _PARISC_SWAB_H */

+ 1 - 1
arch/powerpc/Kconfig

@@ -164,7 +164,6 @@ config PPC
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_GZIP
-	select HAVE_CC_STACKPROTECTOR
 
 
 config GENERIC_CSUM
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
 	def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
 	bool "Build a relocatable kernel"
 	bool "Build a relocatable kernel"
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	select NONSTATIC_KERNEL
 	select NONSTATIC_KERNEL
+	select MODULE_REL_CRCS if MODVERSIONS
 	help
 	help
 	  This builds a kernel image that is capable of running at the
 	  This builds a kernel image that is capable of running at the
 	  location the kernel is loaded at. For ppc32, there is no any
 	  location the kernel is loaded at. For ppc32, there is no any

+ 2 - 0
arch/powerpc/include/asm/cpu_has_feature.h

@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 2 - 0
arch/powerpc/include/asm/mmu.h

@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 0 - 4
arch/powerpc/include/asm/module.h

@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 }
 #endif
 #endif
 
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif	/* _ASM_POWERPC_MODULE_H */
 #endif	/* _ASM_POWERPC_MODULE_H */

+ 2 - 1
arch/powerpc/include/asm/reg.h

@@ -649,9 +649,10 @@
 #define   SRR1_ISI_N_OR_G	0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_N_OR_G	0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_PROT		0x08000000 /* ISI: Other protection fault */
 #define   SRR1_ISI_PROT		0x08000000 /* ISI: Other protection fault */
 #define   SRR1_WAKEMASK		0x00380000 /* reason for wakeup */
 #define   SRR1_WAKEMASK		0x00380000 /* reason for wakeup */
-#define   SRR1_WAKEMASK_P8	0x003c0000 /* reason for wakeup on POWER8 */
+#define   SRR1_WAKEMASK_P8	0x003c0000 /* reason for wakeup on POWER8 and 9 */
 #define   SRR1_WAKESYSERR	0x00300000 /* System error */
 #define   SRR1_WAKESYSERR	0x00300000 /* System error */
 #define   SRR1_WAKEEE		0x00200000 /* External interrupt */
 #define   SRR1_WAKEEE		0x00200000 /* External interrupt */
+#define   SRR1_WAKEHVI		0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
 #define   SRR1_WAKEMT		0x00280000 /* mtctrl */
 #define   SRR1_WAKEMT		0x00280000 /* mtctrl */
 #define	  SRR1_WAKEHMI		0x00280000 /* Hypervisor maintenance */
 #define	  SRR1_WAKEHMI		0x00280000 /* Hypervisor maintenance */
 #define   SRR1_WAKEDEC		0x00180000 /* Decrementer interrupt */
 #define   SRR1_WAKEDEC		0x00180000 /* Decrementer interrupt */

+ 0 - 40
arch/powerpc/include/asm/stackprotector.h

@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-	unsigned long canary;
-
-	/* Try to get a semi random initial value. */
-	get_random_bytes(&canary, sizeof(canary));
-	canary ^= mftb();
-	canary ^= LINUX_VERSION_CODE;
-
-	current->stack_canary = canary;
-	__stack_chk_guard = current->stack_canary;
-}
-
-#endif	/* _ASM_STACKPROTECTOR_H */

+ 1 - 0
arch/powerpc/include/asm/xics.h

@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
 
 
 #ifdef CONFIG_PPC_POWERNV
 #ifdef CONFIG_PPC_POWERNV
 extern int icp_opal_init(void);
 extern int icp_opal_init(void);
+extern void icp_opal_flush_interrupt(void);
 #else
 #else
 static inline int icp_opal_init(void) { return -ENODEV; }
 static inline int icp_opal_init(void) { return -ENODEV; }
 #endif
 #endif

+ 0 - 4
arch/powerpc/kernel/Makefile

@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)

+ 0 - 3
arch/powerpc/kernel/asm-offsets.c

@@ -91,9 +91,6 @@ int main(void)
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 #endif
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-	DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
 #ifdef CONFIG_BOOKE

+ 1 - 1
arch/powerpc/kernel/eeh_driver.c

@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
 {
 	struct eeh_pe *pe = (struct eeh_pe *)data;
 	struct eeh_pe *pe = (struct eeh_pe *)data;
-	bool *clear_sw_state = flag;
+	bool clear_sw_state = *(bool *)flag;
 	int i, rc = 1;
 	int i, rc = 1;
 
 
 	for (i = 0; rc && i < 3; i++)
 	for (i = 0; rc && i < 3; i++)

+ 1 - 5
arch/powerpc/kernel/entry_32.S

@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-	lwz	r0,TSK_STACK_CANARY(r2)
-	lis	r4,__stack_chk_guard@ha
-	stw	r0,__stack_chk_guard@l(r4)
-#endif
+
 	lwz	r0,_CCR(r1)
 	lwz	r0,_CCR(r1)
 	mtcrf	0xFF,r0
 	mtcrf	0xFF,r0
 	/* r3-r12 are destroyed -- Cort */
 	/* r3-r12 are destroyed -- Cort */

+ 0 - 8
arch/powerpc/kernel/module_64.c

@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
 	for (end = (void *)vers + size; vers < end; vers++)
 	for (end = (void *)vers + size; vers < end; vers++)
 		if (vers->name[0] == '.') {
 		if (vers->name[0] == '.') {
 			memmove(vers->name, vers->name+1, strlen(vers->name));
 			memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-			/* The TOC symbol has no CRC computed. To avoid CRC
-			 * check failing, we must force it to the expected
-			 * value (see CRC check in module.c).
-			 */
-			if (!strcmp(vers->name, "TOC."))
-				vers->crc = -(unsigned long)reloc_start;
-#endif
 		}
 		}
 }
 }
 
 

+ 0 - 6
arch/powerpc/kernel/process.c

@@ -64,12 +64,6 @@
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
 #define TM_DEBUG(x...) printk(KERN_INFO x)

+ 3 - 0
arch/powerpc/kernel/prom_init.c

@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
 
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
 
+	if (!PHANDLE_VALID(cpu_pkg))
+		return;
+
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom.cpu = be32_to_cpu(rval);
 	prom.cpu = be32_to_cpu(rval);
 
 

+ 5 - 16
arch/powerpc/mm/fault.c

@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
 	if (unlikely(debugger_fault_handler(regs)))
 	if (unlikely(debugger_fault_handler(regs)))
 		goto bail;
 		goto bail;
 
 
-	/* On a kernel SLB miss we can only check for a valid exception entry */
-	if (!user_mode(regs) && (address >= TASK_SIZE)) {
+	/*
+	 * The kernel should never take an execute fault nor should it
+	 * take a page fault to a kernel address.
+	 */
+	if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
 		rc = SIGSEGV;
 		rc = SIGSEGV;
 		goto bail;
 		goto bail;
 	}
 	}
@@ -390,20 +393,6 @@ good_area:
 #endif /* CONFIG_8xx */
 #endif /* CONFIG_8xx */
 
 
 	if (is_exec) {
 	if (is_exec) {
-		/*
-		 * An execution fault + no execute ?
-		 *
-		 * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
-		 * deliberately create NX mappings, and use the fault to do the
-		 * cache flush. This is usually handled in hash_page_do_lazy_icache()
-		 * but we could end up here if that races with a concurrent PTE
-		 * update. In that case we need to fall through here to the VMA
-		 * check below.
-		 */
-		if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
-			(regs->msr & SRR1_ISI_N_OR_G))
-			goto bad_area;
-
 		/*
 		/*
 		 * Allow execution from readable areas if the MMU does not
 		 * Allow execution from readable areas if the MMU does not
 		 * provide separate controls over reading and executing.
 		 * provide separate controls over reading and executing.

+ 2 - 2
arch/powerpc/mm/pgtable-radix.c

@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		if (!pmdp)
 		if (!pmdp)
 			return -ENOMEM;
 			return -ENOMEM;
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		ptep = pte_alloc_kernel(pmdp, ea);
 		ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		}
 		}
 		pmdp = pmd_offset(pudp, ea);
 		pmdp = pmd_offset(pudp, ea);
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		if (!pmd_present(*pmdp)) {
 		if (!pmd_present(*pmdp)) {

+ 1 - 5
arch/powerpc/mm/tlb-radix.c

@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
 	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
 	for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
 		__tlbiel_pid(pid, set, ric);
 		__tlbiel_pid(pid, set, ric);
 	}
 	}
-	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
-		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
-	return;
+	asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
 }
 }
 
 
 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
 	asm volatile("ptesync": : :"memory");
 	asm volatile("ptesync": : :"memory");
-	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
-		asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 }
 }
 
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
 static inline void _tlbie_va(unsigned long va, unsigned long pid,

+ 10 - 2
arch/powerpc/platforms/powernv/smp.c

@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
 		wmask = SRR1_WAKEMASK_P8;
 		wmask = SRR1_WAKEMASK_P8;
 
 
 	idle_states = pnv_get_supported_cpuidle_states();
 	idle_states = pnv_get_supported_cpuidle_states();
+
 	/* We don't want to take decrementer interrupts while we are offline,
 	/* We don't want to take decrementer interrupts while we are offline,
-	 * so clear LPCR:PECE1. We keep PECE2 enabled.
+	 * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
+	 * enabled as to let IPIs in.
 	 */
 	 */
 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
 
 
@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
 		 * contains 0.
 		 * contains 0.
 		 */
 		 */
 		if (((srr1 & wmask) == SRR1_WAKEEE) ||
 		if (((srr1 & wmask) == SRR1_WAKEEE) ||
+		    ((srr1 & wmask) == SRR1_WAKEHVI) ||
 		    (local_paca->irq_happened & PACA_IRQ_EE)) {
 		    (local_paca->irq_happened & PACA_IRQ_EE)) {
-			icp_native_flush_interrupt();
+			if (cpu_has_feature(CPU_FTR_ARCH_300))
+				icp_opal_flush_interrupt();
+			else
+				icp_native_flush_interrupt();
 		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
 		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
 			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
 			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
 			asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
 			asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
 		if (srr1 && !generic_check_cpu_restart(cpu))
 		if (srr1 && !generic_check_cpu_restart(cpu))
 			DBG("CPU%d Unexpected exit while offline !\n", cpu);
 			DBG("CPU%d Unexpected exit while offline !\n", cpu);
 	}
 	}
+
+	/* Re-enable decrementer interrupts */
 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
 	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
 	DBG("CPU%d coming online...\n", cpu);
 	DBG("CPU%d coming online...\n", cpu);
 }
 }

+ 33 - 2
arch/powerpc/sysdev/xics/icp-opal.c

@@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
 {
 {
 	int hw_cpu = get_hard_smp_processor_id(cpu);
 	int hw_cpu = get_hard_smp_processor_id(cpu);
 
 
+	kvmppc_set_host_ipi(cpu, 1);
 	opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 	opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 }
 
 
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
 {
-	int hw_cpu = hard_smp_processor_id();
+	int cpu = smp_processor_id();
 
 
-	opal_int_set_mfrr(hw_cpu, 0xff);
+	kvmppc_set_host_ipi(cpu, 0);
+	opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
 
 
 	return smp_ipi_demux();
 	return smp_ipi_demux();
 }
 }
 
 
+/*
+ * Called when an interrupt is received on an off-line CPU to
+ * clear the interrupt, so that the CPU can go back to nap mode.
+ */
+void icp_opal_flush_interrupt(void)
+{
+	unsigned int xirr;
+	unsigned int vec;
+
+	do {
+		xirr = icp_opal_get_xirr();
+		vec = xirr & 0x00ffffff;
+		if (vec == XICS_IRQ_SPURIOUS)
+			break;
+		if (vec == XICS_IPI) {
+			/* Clear pending IPI */
+			int cpu = smp_processor_id();
+			kvmppc_set_host_ipi(cpu, 0);
+			opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
+		} else {
+			pr_err("XICS: hw interrupt 0x%x to offline cpu, "
+			       "disabling\n", vec);
+			xics_mask_unknown_vec(vec);
+		}
+
+		/* EOI the interrupt */
+	} while (opal_int_eoi(xirr) > 0);
+}
+
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
 static const struct icp_ops icp_opal_ops = {
 static const struct icp_ops icp_opal_ops = {

+ 8 - 0
arch/s390/kernel/ptrace.c

@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
 	if (target == current)
 	if (target == current)
 		save_fpu_regs();
 		save_fpu_regs();
 
 
+	if (MACHINE_HAS_VX)
+		convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+	else
+		memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
 	/* If setting FPC, must validate it first. */
 	/* If setting FPC, must validate it first. */
 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
 		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
 	if (target == current)
 	if (target == current)
 		save_fpu_regs();
 		save_fpu_regs();
 
 
+	for (i = 0; i < __NUM_VXRS_LOW; i++)
+		vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
 	if (rc == 0)
 	if (rc == 0)
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
 		for (i = 0; i < __NUM_VXRS_LOW; i++)

+ 4 - 3
arch/s390/mm/pgtable.c

@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
 	return pgste;
 	return pgste;
 }
 }
 
 
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 				    unsigned long addr, pte_t *ptep,
 				    unsigned long addr, pte_t *ptep,
 				    pgste_t pgste, pte_t old, pte_t new)
 				    pgste_t pgste, pte_t old, pte_t new)
 {
 {
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
 	} else {
 	} else {
 		*ptep = new;
 		*ptep = new;
 	}
 	}
+	return old;
 }
 }
 
 
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 	preempt_disable();
 	preempt_disable();
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	old = ptep_flush_direct(mm, addr, ptep);
 	old = ptep_flush_direct(mm, addr, ptep);
-	ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 	preempt_enable();
 	preempt_enable();
 	return old;
 	return old;
 }
 }
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 	preempt_disable();
 	preempt_disable();
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	pgste = ptep_xchg_start(mm, addr, ptep);
 	old = ptep_flush_lazy(mm, addr, ptep);
 	old = ptep_flush_lazy(mm, addr, ptep);
-	ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 	preempt_enable();
 	preempt_enable();
 	return old;
 	return old;
 }
 }

+ 4 - 4
arch/sparc/include/asm/mmu_context_64.h

@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
 static inline void tsb_context_switch(struct mm_struct *mm)
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
 {
 	__tsb_context_switch(__pa(mm->pgd),
 	__tsb_context_switch(__pa(mm->pgd),
-			     &mm->context.tsb_block[0],
+			     &mm->context.tsb_block[MM_TSB_BASE],
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-			     (mm->context.tsb_block[1].tsb ?
-			      &mm->context.tsb_block[1] :
+			     (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+			      &mm->context.tsb_block[MM_TSB_HUGE] :
 			      NULL)
 			      NULL)
 #else
 #else
 			     NULL
 			     NULL
 #endif
 #endif
-			     , __pa(&mm->context.tsb_descr[0]));
+			     , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
 }
 }
 
 
 void tsb_grow(struct mm_struct *mm,
 void tsb_grow(struct mm_struct *mm,

+ 1 - 1
arch/sparc/kernel/irq_64.c

@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
 	unsigned long order = get_order(size);
 	unsigned long order = get_order(size);
 	unsigned long p;
 	unsigned long p;
 
 
-	p = __get_free_pages(GFP_KERNEL, order);
+	p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 	if (!p) {
 	if (!p) {
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_halt();
 		prom_halt();

+ 3 - 3
arch/sparc/kernel/sstate.c

@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
 	"Linux powering off";
 	"Linux powering off";
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 	"Linux rebooting";
 	"Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
-	"Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+	"Linux panicking";
 
 
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 {
 {
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
 
 
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 {
 {
-	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
 
 
 	return NOTIFY_DONE;
 	return NOTIFY_DONE;
 }
 }

+ 73 - 0
arch/sparc/kernel/traps_64.c

@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
 	atomic_inc(&sun4v_resum_oflow_cnt);
 	atomic_inc(&sun4v_resum_oflow_cnt);
 }
 }
 
 
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+	unsigned int insn;
+
+	if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+		return compute_effective_address(regs, insn,
+						 (insn >> 25) & 0x1f);
+	}
+	return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+				  struct sun4v_error_entry *ent) {
+
+	unsigned int attrs = ent->err_attrs;
+
+	if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+		unsigned long addr = ent->err_raddr;
+		siginfo_t info;
+
+		if (addr == ~(u64)0) {
+			/* This seems highly unlikely to ever occur */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+		} else {
+			unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+							      PAGE_SIZE);
+
+			/* Break the unfortunate news. */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+				 addr);
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
+				 page_cnt);
+
+			while (page_cnt-- > 0) {
+				if (pfn_valid(addr >> PAGE_SHIFT))
+					get_page(pfn_to_page(addr >> PAGE_SHIFT));
+				addr += PAGE_SIZE;
+			}
+		}
+		info.si_signo = SIGKILL;
+		info.si_errno = 0;
+		info.si_trapno = 0;
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+	if (attrs & SUN4V_ERR_ATTRS_PIO) {
+		siginfo_t info;
+
+		info.si_signo = SIGBUS;
+		info.si_code = BUS_ADRERR;
+		info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+
+	/* Default to doing nothing */
+	return false;
+}
+
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  * Log the event, clear the first word of the entry, and die.
  */
  */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 
 
 	put_cpu();
 	put_cpu();
 
 
+	if (!(regs->tstate & TSTATE_PRIV) &&
+	    sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+		/* DON'T PANIC: This userspace error was handled. */
+		return;
+	}
+
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
 	/* Check for the special PCI poke sequence. */
 	/* Check for the special PCI poke sequence. */
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {

+ 4 - 4
arch/x86/crypto/aesni-intel_glue.c

@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
 		    aesni_simd_skciphers[i]; i++)
 		    aesni_simd_skciphers[i]; i++)
 		simd_skcipher_free(aesni_simd_skciphers[i]);
 		simd_skcipher_free(aesni_simd_skciphers[i]);
 
 
-	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
-		    aesni_simd_skciphers2[i].simd; i++)
-		simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
+		if (aesni_simd_skciphers2[i].simd)
+			simd_skcipher_free(aesni_simd_skciphers2[i].simd);
 }
 }
 
 
 static int __init aesni_init(void)
 static int __init aesni_init(void)
@@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
 		simd = simd_skcipher_create_compat(algname, drvname, basename);
 		simd = simd_skcipher_create_compat(algname, drvname, basename);
 		err = PTR_ERR(simd);
 		err = PTR_ERR(simd);
 		if (IS_ERR(simd))
 		if (IS_ERR(simd))
-			goto unregister_simds;
+			continue;
 
 
 		aesni_simd_skciphers2[i].simd = simd;
 		aesni_simd_skciphers2[i].simd = simd;
 	}
 	}

+ 26 - 34
arch/x86/events/intel/rapl.c

@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
 {
-	return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 }
 
 
 static inline u64 rapl_read_counter(struct perf_event *event)
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
 
 	/* must be done before validate_group */
 	/* must be done before validate_group */
 	pmu = cpu_to_rapl_pmu(event->cpu);
 	pmu = cpu_to_rapl_pmu(event->cpu);
+	if (!pmu)
+		return -EINVAL;
 	event->cpu = pmu->cpu;
 	event->cpu = pmu->cpu;
 	event->pmu_private = pmu;
 	event->pmu_private = pmu;
 	event->hw.event_base = msr;
 	event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 	int target;
 
 
+	if (!pmu) {
+		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+		if (!pmu)
+			return -ENOMEM;
+
+		raw_spin_lock_init(&pmu->lock);
+		INIT_LIST_HEAD(&pmu->active_list);
+		pmu->pmu = &rapl_pmus->pmu;
+		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+		rapl_hrtimer_init(pmu);
+
+		rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+	}
+
 	/*
 	/*
 	 * Check if there is an online cpu in the package which collects rapl
 	 * Check if there is an online cpu in the package which collects rapl
 	 * events already.
 	 * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
 	return 0;
 	return 0;
 }
 }
 
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-	if (pmu)
-		return 0;
-
-	pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-	if (!pmu)
-		return -ENOMEM;
-
-	raw_spin_lock_init(&pmu->lock);
-	INIT_LIST_HEAD(&pmu->active_list);
-	pmu->pmu = &rapl_pmus->pmu;
-	pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-	pmu->cpu = -1;
-	rapl_hrtimer_init(pmu);
-	rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-	return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 static int rapl_check_hw_unit(bool apply_quirk)
 {
 {
 	u64 msr_rapl_power_unit_bits;
 	u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
 	/*
 	/*
 	 * Install callbacks. Core will call them for each online cpu.
 	 * Install callbacks. Core will call them for each online cpu.
 	 */
 	 */
-
-	ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-				rapl_cpu_prepare, NULL);
-	if (ret)
-		goto out;
-
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 				"perf/x86/rapl:online",
 				"perf/x86/rapl:online",
 				rapl_cpu_online, rapl_cpu_offline);
 				rapl_cpu_online, rapl_cpu_offline);
 	if (ret)
 	if (ret)
-		goto out1;
+		goto out;
 
 
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	if (ret)
 	if (ret)
-		goto out2;
+		goto out1;
 
 
 	rapl_advertise();
 	rapl_advertise();
 	return 0;
 	return 0;
 
 
-out2:
-	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
 out1:
-	cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
 out:
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 static void __exit intel_rapl_exit(void)
 {
 {
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
 }
 }

+ 91 - 141
arch/x86/events/intel/uncore.c

@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
 {
-	return pmu->boxes[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 }
 
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
 	pmu->registered = false;
 	pmu->registered = false;
 }
 }
 
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-	struct intel_uncore_pmu *pmu = type->pmus;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	if (pmu) {
-		pkg = topology_physical_package_id(cpu);
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box)
-				uncore_box_exit(box);
-		}
-	}
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-	struct intel_uncore_type **types;
-
-	for (types = uncore_msr_uncores; *types; types++)
-		__uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
 {
 	int pkg;
 	int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
 	}
 	}
 }
 }
 
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box && atomic_dec_return(&box->refcnt) == 0)
-				uncore_box_exit(box);
-		}
-	}
-	return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg, ncpus = 1;
-
-	if (first_init) {
-		/*
-		 * On init we get the number of online cpus in the package
-		 * and set refcount for all of them.
-		 */
-		ncpus = cpumask_weight(topology_core_cpumask(cpu));
-	}
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (!box)
-				continue;
-			/* The first cpu on a package activates the box */
-			if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-				uncore_box_init(box);
-		}
-	}
-
-	return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			if (pmu->boxes[pkg])
-				continue;
-			/* First cpu of a package allocates the box */
-			box = uncore_alloc_box(type, cpu_to_node(cpu));
-			if (!box)
-				return -ENOMEM;
-			box->pmu = pmu;
-			box->pkgid = pkg;
-			pmu->boxes[pkg] = box;
-		}
-	}
-	return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 				   int new_cpu)
 				   int new_cpu)
 {
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, pkg, target;
 
 
 	/* Check if exiting cpu is used for collecting uncore events */
 	/* Check if exiting cpu is used for collecting uncore events */
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-		return 0;
-
+		goto unref;
 	/* Find a new cpu to collect uncore events */
 	/* Find a new cpu to collect uncore events */
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
 
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+	/* Clear the references */
+	pkg = topology_logical_package_id(cpu);
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (box && atomic_dec_return(&box->refcnt) == 0)
+				uncore_box_exit(box);
+		}
+	}
 	return 0;
 	return 0;
 }
 }
 
 
+static int allocate_boxes(struct intel_uncore_type **types,
+			 unsigned int pkg, unsigned int cpu)
+{
+	struct intel_uncore_box *box, *tmp;
+	struct intel_uncore_type *type;
+	struct intel_uncore_pmu *pmu;
+	LIST_HEAD(allocated);
+	int i;
+
+	/* Try to allocate all required boxes */
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			if (pmu->boxes[pkg])
+				continue;
+			box = uncore_alloc_box(type, cpu_to_node(cpu));
+			if (!box)
+				goto cleanup;
+			box->pmu = pmu;
+			box->pkgid = pkg;
+			list_add(&box->active_list, &allocated);
+		}
+	}
+	/* Install them in the pmus */
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		box->pmu->boxes[pkg] = box;
+	}
+	return 0;
+
+cleanup:
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		kfree(box);
+	}
+	return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 static int uncore_event_cpu_online(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, ret, pkg, target;
+
+	pkg = topology_logical_package_id(cpu);
+	ret = allocate_boxes(types, pkg, cpu);
+	if (ret)
+		return ret;
+
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (!box && atomic_inc_return(&box->refcnt) == 1)
+				uncore_box_init(box);
+		}
+	}
 
 
 	/*
 	/*
 	 * Check if there is an online cpu in the package
 	 * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
 	if (cret && pret)
 	if (cret && pret)
 		return -ENODEV;
 		return -ENODEV;
 
 
-	/*
-	 * Install callbacks. Core will call them for each online cpu.
-	 *
-	 * The first online cpu of each package allocates and takes
-	 * the refcounts for all other online cpus in that package.
-	 * If msrs are not enabled no allocation is required and
-	 * uncore_cpu_prepare() is not called for each online cpu.
-	 */
-	if (!cret) {
-	       ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-				       "perf/x86/intel/uncore:prepare",
-				       uncore_cpu_prepare, NULL);
-		if (ret)
-			goto err;
-	} else {
-		cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-					  "perf/x86/intel/uncore:prepare",
-					  uncore_cpu_prepare, NULL);
-	}
-	first_init = 1;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-			  "perf/x86/uncore:starting",
-			  uncore_cpu_starting, uncore_cpu_dying);
-	first_init = 0;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-			  "perf/x86/uncore:online",
-			  uncore_event_cpu_online, uncore_event_cpu_offline);
+	/* Install hotplug callbacks to setup the targets for each package */
+	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+				"perf/x86/intel/uncore:online",
+				uncore_event_cpu_online,
+				uncore_event_cpu_offline);
+	if (ret)
+		goto err;
 	return 0;
 	return 0;
 
 
 err:
 err:
-	/* Undo box->init_box() */
-	on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 	return ret;
 	return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
 
 
 static void __exit intel_uncore_exit(void)
 static void __exit intel_uncore_exit(void)
 {
 {
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 }
 }

+ 1 - 0
arch/x86/include/asm/microcode.h

@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 #else
 static inline int __init microcode_init(void)			{ return 0; };
 static inline int __init microcode_init(void)			{ return 0; };
 static inline void __init load_ucode_bsp(void)			{ }
 static inline void __init load_ucode_bsp(void)			{ }

+ 1 - 0
arch/x86/include/asm/processor.h

@@ -104,6 +104,7 @@ struct cpuinfo_x86 {
 	__u8			x86_phys_bits;
 	__u8			x86_phys_bits;
 	/* CPUID returned core id bits: */
 	/* CPUID returned core id bits: */
 	__u8			x86_coreid_bits;
 	__u8			x86_coreid_bits;
+	__u8			cu_id;
 	/* Max extended CPUID function supported: */
 	/* Max extended CPUID function supported: */
 	__u32			extended_cpuid_level;
 	__u32			extended_cpuid_level;
 	/* Maximum supported CPUID level, -1=no CPUID: */
 	/* Maximum supported CPUID level, -1=no CPUID: */

+ 2 - 2
arch/x86/kernel/apic/io_apic.c

@@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_eoi		= ioapic_ack_level,
 	.irq_eoi		= ioapic_ack_level,
 	.irq_set_affinity	= ioapic_set_affinity,
 	.irq_set_affinity	= ioapic_set_affinity,
-	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.flags			= IRQCHIP_SKIP_SET_WAKE,
 	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 };
 
 
@@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_eoi		= ioapic_ir_ack_level,
 	.irq_eoi		= ioapic_ir_ack_level,
 	.irq_set_affinity	= ioapic_set_affinity,
 	.irq_set_affinity	= ioapic_set_affinity,
-	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.flags			= IRQCHIP_SKIP_SET_WAKE,
 	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 };
 
 
@@ -2117,6 +2115,7 @@ static inline void __init check_timer(void)
 			if (idx != -1 && irq_trigger(idx))
 			if (idx != -1 && irq_trigger(idx))
 				unmask_ioapic_irq(irq_get_chip_data(0));
 				unmask_ioapic_irq(irq_get_chip_data(0));
 		}
 		}
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {
 			if (disable_timer_pin_1 > 0)
 			if (disable_timer_pin_1 > 0)
@@ -2138,6 +2137,7 @@ static inline void __init check_timer(void)
 		 * legacy devices should be connected to IO APIC #0
 		 * legacy devices should be connected to IO APIC #0
 		 */
 		 */
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		legacy_pic->unmask(0);
 		legacy_pic->unmask(0);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {

+ 15 - 1
arch/x86/kernel/cpu/amd.c

@@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
 
 
 	/* get information required for multi-node processors */
 	/* get information required for multi-node processors */
 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+		u32 eax, ebx, ecx, edx;
 
 
-		node_id = cpuid_ecx(0x8000001e) & 7;
+		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+
+		node_id  = ecx & 0xff;
+		smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+		if (c->x86 == 0x15)
+			c->cu_id = ebx & 0xff;
+
+		if (c->x86 >= 0x17) {
+			c->cpu_core_id = ebx & 0xff;
+
+			if (smp_num_siblings > 1)
+				c->x86_max_cores /= smp_num_siblings;
+		}
 
 
 		/*
 		/*
 		 * We may have multiple LLCs if L3 caches exist, so check if we
 		 * We may have multiple LLCs if L3 caches exist, so check if we

+ 1 - 0
arch/x86/kernel/cpu/common.c

@@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
 	c->x86_model_id[0] = '\0';  /* Unset */
 	c->x86_model_id[0] = '\0';  /* Unset */
 	c->x86_max_cores = 1;
 	c->x86_max_cores = 1;
 	c->x86_coreid_bits = 0;
 	c->x86_coreid_bits = 0;
+	c->cu_id = 0xff;
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
 	c->x86_clflush_size = 64;
 	c->x86_clflush_size = 64;
 	c->x86_phys_bits = 36;
 	c->x86_phys_bits = 36;

+ 12 - 19
arch/x86/kernel/cpu/mcheck/mce.c

@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
 {
 	unsigned long when = jiffies + interval;
 	unsigned long when = jiffies + interval;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
-	if (timer_pending(t)) {
-		if (time_before(when, t->expires))
-			mod_timer(t, when);
-	} else {
-		t->expires = round_jiffies(when);
-		add_timer_on(t, smp_processor_id());
-	}
+	if (!timer_pending(t) || time_before(when, t->expires))
+		mod_timer(t, round_jiffies(when));
 
 
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
 
 
 done:
 done:
 	__this_cpu_write(mce_next_interval, iv);
 	__this_cpu_write(mce_next_interval, iv);
-	__restart_timer(t, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 /*
 /*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 
 
-	__restart_timer(t, interval);
+	__start_timer(t, interval);
 
 
 	if (interval < iv)
 	if (interval < iv)
 		__this_cpu_write(mce_next_interval, interval);
 		__this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
 	}
 	}
 }
 }
 
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
 {
 	unsigned long iv = check_interval * HZ;
 	unsigned long iv = check_interval * HZ;
 
 
 	if (mca_cfg.ignore_ce || !iv)
 	if (mca_cfg.ignore_ce || !iv)
 		return;
 		return;
 
 
-	per_cpu(mce_next_interval, cpu) = iv;
-
-	t->expires = round_jiffies(jiffies + iv);
-	add_timer_on(t, cpu);
+	this_cpu_write(mce_next_interval, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 static void __mcheck_cpu_setup_timer(void)
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 
 
 	setup_pinned_timer(t, mce_timer_fn, cpu);
 	setup_pinned_timer(t, mce_timer_fn, cpu);
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 }
 }
 
 
 /* Handle unconfigured int18 (should never happen) */
 /* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 
 static int mce_cpu_online(unsigned int cpu)
 static int mce_cpu_online(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	int ret;
 	int ret;
 
 
 	mce_device_create(cpu);
 	mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
 		return ret;
 		return ret;
 	}
 	}
 	mce_reenable_cpu();
 	mce_reenable_cpu();
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 	return 0;
 	return 0;
 }
 }
 
 
 static int mce_cpu_pre_down(unsigned int cpu)
 static int mce_cpu_pre_down(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 
 
 	mce_disable_cpu();
 	mce_disable_cpu();
 	del_timer_sync(t);
 	del_timer_sync(t);

+ 3 - 2
arch/x86/kernel/cpu/microcode/amd.c

@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
 reget:
 reget:
 		if (!get_builtin_microcode(&cp, family)) {
 		if (!get_builtin_microcode(&cp, family)) {
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
-			cp = find_cpio_data(ucode_path, (void *)initrd_start,
-					    initrd_end - initrd_start, NULL);
+			if (!initrd_gone)
+				cp = find_cpio_data(ucode_path, (void *)initrd_start,
+						    initrd_end - initrd_start, NULL);
 #endif
 #endif
 			if (!(cp.data && cp.size)) {
 			if (!(cp.data && cp.size)) {
 				/*
 				/*

+ 17 - 5
arch/x86/kernel/cpu/microcode/core.c

@@ -46,6 +46,8 @@
 static struct microcode_ops	*microcode_ops;
 static struct microcode_ops	*microcode_ops;
 static bool dis_ucode_ldr = true;
 static bool dis_ucode_ldr = true;
 
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 LIST_HEAD(microcode_cache);
 
 
 /*
 /*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 static int __init save_microcode_in_initrd(void)
 {
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 	struct cpuinfo_x86 *c = &boot_cpu_data;
+	int ret = -EINVAL;
 
 
 	switch (c->x86_vendor) {
 	switch (c->x86_vendor) {
 	case X86_VENDOR_INTEL:
 	case X86_VENDOR_INTEL:
 		if (c->x86 >= 6)
 		if (c->x86 >= 6)
-			return save_microcode_in_initrd_intel();
+			ret = save_microcode_in_initrd_intel();
 		break;
 		break;
 	case X86_VENDOR_AMD:
 	case X86_VENDOR_AMD:
 		if (c->x86 >= 0x10)
 		if (c->x86 >= 0x10)
-			return save_microcode_in_initrd_amd(c->x86);
+			ret = save_microcode_in_initrd_amd(c->x86);
 		break;
 		break;
 	default:
 	default:
 		break;
 		break;
 	}
 	}
 
 
-	return -EINVAL;
+	initrd_gone = true;
+
+	return ret;
 }
 }
 
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 	 * has the virtual address of the beginning of the initrd. It also
 	 * has the virtual address of the beginning of the initrd. It also
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * the updated address so use that instead.
 	 * the updated address so use that instead.
+	 *
+	 * initrd_gone is for the hotplug case where we've thrown out initrd
+	 * already.
 	 */
 	 */
-	if (!use_pa && initrd_start)
-		start = initrd_start;
+	if (!use_pa) {
+		if (initrd_gone)
+			return (struct cpio_data){ NULL, 0, "" };
+		if (initrd_start)
+			start = initrd_start;
+	}
 
 
 	return find_cpio_data(path, (void *)start, size, NULL);
 	return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
 #else /* !CONFIG_BLK_DEV_INITRD */

+ 1 - 8
arch/x86/kernel/cpu/microcode/intel.c

@@ -41,7 +41,7 @@
 
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 struct microcode_intel *intel_ucode_patch;
 
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
 	struct ucode_cpu_info uci;
 	struct ucode_cpu_info uci;
 	struct cpio_data cp;
 	struct cpio_data cp;
 
 
-	/*
-	 * AP loading didn't find any microcode patch, no need to save anything.
-	 */
-	if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-		return 0;
-
 	if (!load_builtin_intel_microcode(&cp))
 	if (!load_builtin_intel_microcode(&cp))
 		cp = find_microcode_in_initrd(ucode_path, false);
 		cp = find_microcode_in_initrd(ucode_path, false);
 
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
 	return 0;
 	return 0;
 }
 }
 
 
-
 /*
 /*
  * @res_patch, output: a pointer to the patch we found.
  * @res_patch, output: a pointer to the patch we found.
  */
  */

+ 3 - 1
arch/x86/kernel/fpu/core.c

@@ -9,6 +9,7 @@
 #include <asm/fpu/regset.h>
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
 #include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
 #include <asm/traps.h>
 #include <asm/traps.h>
 
 
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
 	 * it will #GP. Make sure it is replaced after the memset().
 	 * it will #GP. Make sure it is replaced after the memset().
 	 */
 	 */
 	if (static_cpu_has(X86_FEATURE_XSAVES))
 	if (static_cpu_has(X86_FEATURE_XSAVES))
-		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+					       xfeatures_mask;
 
 
 	if (static_cpu_has(X86_FEATURE_FXSR))
 	if (static_cpu_has(X86_FEATURE_FXSR))
 		fpstate_init_fxstate(&state->fxsave);
 		fpstate_init_fxstate(&state->fxsave);

+ 1 - 0
arch/x86/kernel/hpet.c

@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
 	} else {
 	} else {
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
 
+		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		disable_irq(hdev->irq);
 		disable_irq(hdev->irq);
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));

+ 9 - 3
arch/x86/kernel/smpboot.c

@@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 
 
 		if (c->phys_proc_id == o->phys_proc_id &&
 		if (c->phys_proc_id == o->phys_proc_id &&
-		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
-		    c->cpu_core_id == o->cpu_core_id)
-			return topology_sane(c, o, "smt");
+		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
+			if (c->cpu_core_id == o->cpu_core_id)
+				return topology_sane(c, o, "smt");
+
+			if ((c->cu_id != 0xff) &&
+			    (o->cu_id != 0xff) &&
+			    (c->cu_id == o->cu_id))
+				return topology_sane(c, o, "smt");
+		}
 
 
 	} else if (c->phys_proc_id == o->phys_proc_id &&
 	} else if (c->phys_proc_id == o->phys_proc_id &&
 		   c->cpu_core_id == o->cpu_core_id) {
 		   c->cpu_core_id == o->cpu_core_id) {

+ 3 - 2
arch/x86/kernel/tsc.c

@@ -1356,6 +1356,9 @@ void __init tsc_init(void)
 		(unsigned long)cpu_khz / 1000,
 		(unsigned long)cpu_khz / 1000,
 		(unsigned long)cpu_khz % 1000);
 		(unsigned long)cpu_khz % 1000);
 
 
+	/* Sanitize TSC ADJUST before cyc2ns gets initialized */
+	tsc_store_and_check_tsc_adjust(true);
+
 	/*
 	/*
 	 * Secondary CPUs do not run through tsc_init(), so set up
 	 * Secondary CPUs do not run through tsc_init(), so set up
 	 * all the scale factors for all CPUs, assuming the same
 	 * all the scale factors for all CPUs, assuming the same
@@ -1386,8 +1389,6 @@ void __init tsc_init(void)
 
 
 	if (unsynchronized_tsc())
 	if (unsynchronized_tsc())
 		mark_tsc_unstable("TSCs unsynchronized");
 		mark_tsc_unstable("TSCs unsynchronized");
-	else
-		tsc_store_and_check_tsc_adjust(true);
 
 
 	check_system_tsc_reliable();
 	check_system_tsc_reliable();
 
 

+ 7 - 9
arch/x86/kernel/tsc_sync.c

@@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
 	if (unsynchronized_tsc())
 	if (unsynchronized_tsc())
 		return;
 		return;
 
 
-	if (tsc_clocksource_reliable) {
-		if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
-			pr_info(
-			"Skipped synchronization checks as TSC is reliable.\n");
-		return;
-	}
-
 	/*
 	/*
 	 * Set the maximum number of test runs to
 	 * Set the maximum number of test runs to
 	 *  1 if the CPU does not provide the TSC_ADJUST MSR
 	 *  1 if the CPU does not provide the TSC_ADJUST MSR
@@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
 	int cpus = 2;
 	int cpus = 2;
 
 
 	/* Also aborts if there is no TSC. */
 	/* Also aborts if there is no TSC. */
-	if (unsynchronized_tsc() || tsc_clocksource_reliable)
+	if (unsynchronized_tsc())
 		return;
 		return;
 
 
 	/*
 	/*
 	 * Store, verify and sanitize the TSC adjust register. If
 	 * Store, verify and sanitize the TSC adjust register. If
 	 * successful skip the test.
 	 * successful skip the test.
+	 *
+	 * The test is also skipped when the TSC is marked reliable. This
+	 * is true for SoCs which have no fallback clocksource. On these
+	 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
+	 * register might have been wreckaged by the BIOS..
 	 */
 	 */
-	if (tsc_store_and_check_tsc_adjust(false)) {
+	if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
 		atomic_inc(&skip_test);
 		atomic_inc(&skip_test);
 		return;
 		return;
 	}
 	}

+ 1 - 0
arch/x86/kvm/x86.c

@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
 
 	/* Set XSTATE_BV */
 	/* Set XSTATE_BV */
+	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
 
 	/*
 	/*

+ 2 - 0
arch/x86/mm/dump_pagetables.c

@@ -15,6 +15,7 @@
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 
 
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
@@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 		} else
 		} else
 			note_page(m, &st, __pgprot(0), 1);
 			note_page(m, &st, __pgprot(0), 1);
 
 
+		cond_resched();
 		start++;
 		start++;
 	}
 	}
 
 

+ 16 - 0
arch/x86/platform/efi/efi_64.c

@@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
 
 	efi_scratch.use_pgd = true;
 	efi_scratch.use_pgd = true;
 
 
+	/*
+	 * Certain firmware versions are way too sentimential and still believe
+	 * they are exclusive and unquestionable owners of the first physical page,
+	 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+	 * (but then write-access it later during SetVirtualAddressMap()).
+	 *
+	 * Create a 1:1 mapping for this page, to avoid triple faults during early
+	 * boot with such firmware. We are free to hand this page to the BIOS,
+	 * as trim_bios_range() will reserve the first page and isolate it away
+	 * from memory allocators anyway.
+	 */
+	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+		pr_err("Failed to create 1:1 mapping for the first page!\n");
+		return 1;
+	}
+
 	/*
 	/*
 	 * When making calls to the firmware everything needs to be 1:1
 	 * When making calls to the firmware everything needs to be 1:1
 	 * mapped and addressable with 32-bit pointers. Map the kernel
 	 * mapped and addressable with 32-bit pointers. Map the kernel

+ 1 - 1
arch/xtensa/kernel/setup.c

@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 
 void cpu_reset(void)
 void cpu_reset(void)
 {
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
 	local_irq_disable();
 	local_irq_disable();
 	/*
 	/*
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must

部分文件因为文件数量过多而无法显示