浏览代码

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

The conflict was an interaction between a bug fix in the
netvsc driver in 'net' and an optimization of the RX path
in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 年之前
父节点
当前提交
3efa70d78f
共有 100 个文件被更改,包括 556 次插入626 次删除
  1. 0 5
      Documentation/media/uapi/cec/cec-func-close.rst
  2. 0 5
      Documentation/media/uapi/cec/cec-func-ioctl.rst
  3. 0 5
      Documentation/media/uapi/cec/cec-func-open.rst
  4. 0 5
      Documentation/media/uapi/cec/cec-func-poll.rst
  5. 12 5
      Documentation/media/uapi/cec/cec-intro.rst
  6. 0 5
      Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
  7. 0 5
      Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
  8. 0 5
      Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
  9. 0 5
      Documentation/media/uapi/cec/cec-ioc-dqevent.rst
  10. 0 5
      Documentation/media/uapi/cec/cec-ioc-g-mode.rst
  11. 0 5
      Documentation/media/uapi/cec/cec-ioc-receive.rst
  12. 1 1
      MAINTAINERS
  13. 2 2
      Makefile
  14. 1 1
      arch/arc/kernel/unaligned.c
  15. 1 1
      arch/powerpc/Kconfig
  16. 2 0
      arch/powerpc/include/asm/cpu_has_feature.h
  17. 2 0
      arch/powerpc/include/asm/mmu.h
  18. 0 4
      arch/powerpc/include/asm/module.h
  19. 0 40
      arch/powerpc/include/asm/stackprotector.h
  20. 0 4
      arch/powerpc/kernel/Makefile
  21. 0 3
      arch/powerpc/kernel/asm-offsets.c
  22. 1 1
      arch/powerpc/kernel/eeh_driver.c
  23. 1 5
      arch/powerpc/kernel/entry_32.S
  24. 0 8
      arch/powerpc/kernel/module_64.c
  25. 0 6
      arch/powerpc/kernel/process.c
  26. 3 0
      arch/powerpc/kernel/prom_init.c
  27. 2 2
      arch/powerpc/mm/pgtable-radix.c
  28. 4 4
      arch/x86/crypto/aesni-intel_glue.c
  29. 26 34
      arch/x86/events/intel/rapl.c
  30. 91 141
      arch/x86/events/intel/uncore.c
  31. 1 0
      arch/x86/include/asm/microcode.h
  32. 2 0
      arch/x86/kernel/apic/io_apic.c
  33. 12 19
      arch/x86/kernel/cpu/mcheck/mce.c
  34. 3 2
      arch/x86/kernel/cpu/microcode/amd.c
  35. 17 5
      arch/x86/kernel/cpu/microcode/core.c
  36. 1 8
      arch/x86/kernel/cpu/microcode/intel.c
  37. 3 1
      arch/x86/kernel/fpu/core.c
  38. 1 0
      arch/x86/kernel/hpet.c
  39. 1 0
      arch/x86/kvm/x86.c
  40. 16 0
      arch/x86/platform/efi/efi_64.c
  41. 1 1
      arch/xtensa/kernel/setup.c
  42. 1 1
      crypto/algif_aead.c
  43. 5 1
      drivers/acpi/nfit/core.c
  44. 1 4
      drivers/base/firmware_class.c
  45. 6 6
      drivers/base/memory.c
  46. 6 5
      drivers/base/power/runtime.c
  47. 14 3
      drivers/cpufreq/brcmstb-avs-cpufreq.c
  48. 30 0
      drivers/cpufreq/intel_pstate.c
  49. 1 1
      drivers/crypto/ccp/ccp-dev-v5.c
  50. 1 0
      drivers/crypto/ccp/ccp-dev.h
  51. 5 1
      drivers/crypto/ccp/ccp-dmaengine.c
  52. 28 25
      drivers/crypto/chelsio/chcr_algo.c
  53. 8 10
      drivers/crypto/chelsio/chcr_core.c
  54. 3 0
      drivers/crypto/chelsio/chcr_crypto.h
  55. 1 1
      drivers/crypto/qat/qat_c62x/adf_drv.c
  56. 1 0
      drivers/crypto/qat/qat_common/adf_accel_devices.h
  57. 2 2
      drivers/crypto/qat/qat_common/qat_hal.c
  58. 3 11
      drivers/firmware/efi/libstub/fdt.c
  59. 3 1
      drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
  60. 8 5
      drivers/gpu/drm/drm_atomic.c
  61. 0 9
      drivers/gpu/drm/drm_atomic_helper.c
  62. 18 5
      drivers/gpu/drm/drm_connector.c
  63. 4 0
      drivers/gpu/drm/drm_drv.c
  64. 4 12
      drivers/gpu/drm/i915/i915_drv.h
  65. 20 0
      drivers/gpu/drm/i915/intel_atomic_plane.c
  66. 42 83
      drivers/gpu/drm/i915/intel_display.c
  67. 7 2
      drivers/gpu/drm/i915/intel_drv.h
  68. 20 32
      drivers/gpu/drm/i915/intel_fbc.c
  69. 2 2
      drivers/gpu/drm/i915/intel_fbdev.c
  70. 4 4
      drivers/gpu/drm/i915/intel_sprite.c
  71. 2 1
      drivers/gpu/drm/nouveau/dispnv04/hw.c
  72. 1 0
      drivers/gpu/drm/nouveau/nouveau_fence.h
  73. 1 1
      drivers/gpu/drm/nouveau/nouveau_led.h
  74. 2 1
      drivers/gpu/drm/nouveau/nouveau_usif.c
  75. 6 0
      drivers/gpu/drm/nouveau/nv50_display.c
  76. 6 0
      drivers/gpu/drm/nouveau/nv84_fence.c
  77. 1 1
      drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
  78. 0 2
      drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
  79. 2 1
      drivers/gpu/drm/radeon/radeon_drv.c
  80. 2 2
      drivers/gpu/drm/radeon/radeon_gem.c
  81. 1 0
      drivers/hv/ring_buffer.c
  82. 2 2
      drivers/iio/adc/palmas_gpadc.c
  83. 2 2
      drivers/iio/health/afe4403.c
  84. 2 2
      drivers/iio/health/afe4404.c
  85. 1 1
      drivers/iio/health/max30100.c
  86. 4 2
      drivers/iio/humidity/dht11.c
  87. 4 4
      drivers/md/dm-crypt.c
  88. 2 2
      drivers/md/dm-mpath.c
  89. 4 0
      drivers/md/dm-rq.c
  90. 1 1
      drivers/media/cec/cec-adap.c
  91. 2 1
      drivers/mmc/host/sdhci.c
  92. 2 6
      drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
  93. 2 2
      drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
  94. 25 10
      drivers/net/ethernet/mellanox/mlx4/en_netdev.c
  95. 4 1
      drivers/net/ethernet/mellanox/mlx4/en_rx.c
  96. 2 1
      drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
  97. 3 0
      drivers/net/hyperv/netvsc.c
  98. 2 2
      drivers/net/macvtap.c
  99. 7 0
      drivers/net/phy/phy_device.c
  100. 6 4
      drivers/net/tun.c

+ 0 - 5
Documentation/media/uapi/cec/cec-func-close.rst

@@ -33,11 +33,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 Closes the cec device. Resources associated with the file descriptor are
 Closes the cec device. Resources associated with the file descriptor are
 freed. The device configuration remain unchanged.
 freed. The device configuration remain unchanged.
 
 

+ 0 - 5
Documentation/media/uapi/cec/cec-func-ioctl.rst

@@ -39,11 +39,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 The :c:func:`ioctl()` function manipulates cec device parameters. The
 The :c:func:`ioctl()` function manipulates cec device parameters. The
 argument ``fd`` must be an open file descriptor.
 argument ``fd`` must be an open file descriptor.
 
 

+ 0 - 5
Documentation/media/uapi/cec/cec-func-open.rst

@@ -46,11 +46,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To open a cec device applications call :c:func:`open()` with the
 To open a cec device applications call :c:func:`open()` with the
 desired device name. The function has no side effects; the device
 desired device name. The function has no side effects; the device
 configuration remain unchanged.
 configuration remain unchanged.

+ 0 - 5
Documentation/media/uapi/cec/cec-func-poll.rst

@@ -39,11 +39,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 With the :c:func:`poll()` function applications can wait for CEC
 With the :c:func:`poll()` function applications can wait for CEC
 events.
 events.
 
 

+ 12 - 5
Documentation/media/uapi/cec/cec-intro.rst

@@ -3,11 +3,6 @@
 Introduction
 Introduction
 ============
 ============
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 HDMI connectors provide a single pin for use by the Consumer Electronics
 HDMI connectors provide a single pin for use by the Consumer Electronics
 Control protocol. This protocol allows different devices connected by an
 Control protocol. This protocol allows different devices connected by an
 HDMI cable to communicate. The protocol for CEC version 1.4 is defined
 HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@@ -31,3 +26,15 @@ control just the CEC pin.
 Drivers that support CEC will create a CEC device node (/dev/cecX) to
 Drivers that support CEC will create a CEC device node (/dev/cecX) to
 give userspace access to the CEC adapter. The
 give userspace access to the CEC adapter. The
 :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
 :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
+
+In order to check the support and test it, it is suggested to download
+the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
+provides three tools to handle CEC:
+
+- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
+  and monitor CEC messages.
+
+- cec-compliance: does a CEC compliance test of a remote CEC device to
+  determine how compliant the CEC implementation is.
+
+- cec-follower: emulates a CEC follower.

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst

@@ -29,11 +29,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 device information, applications call the ioctl with a pointer to a
 device information, applications call the ioctl with a pointer to a
 struct :c:type:`cec_caps`. The driver fills the structure and
 struct :c:type:`cec_caps`. The driver fills the structure and

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst

@@ -35,11 +35,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current CEC logical addresses, applications call
 To query the current CEC logical addresses, applications call
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
 struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
 struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst

@@ -35,11 +35,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current physical address applications call
 To query the current physical address applications call
 :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
 :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
 driver stores the physical address.
 driver stores the physical address.

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-dqevent.rst

@@ -30,11 +30,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 CEC devices can send asynchronous events. These can be retrieved by
 CEC devices can send asynchronous events. These can be retrieved by
 calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
 calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
 non-blocking mode and no event is pending, then it will return -1 and
 non-blocking mode and no event is pending, then it will return -1 and

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-g-mode.rst

@@ -31,11 +31,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
 applications from stepping on each others toes it must be possible to
 applications from stepping on each others toes it must be possible to
 obtain exclusive access to the CEC adapter. This ioctl sets the
 obtain exclusive access to the CEC adapter. This ioctl sets the

+ 0 - 5
Documentation/media/uapi/cec/cec-ioc-receive.rst

@@ -34,11 +34,6 @@ Arguments
 Description
 Description
 ===========
 ===========
 
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To receive a CEC message the application has to fill in the
 To receive a CEC message the application has to fill in the
 ``timeout`` field of struct :c:type:`cec_msg` and pass it to
 ``timeout`` field of struct :c:type:`cec_msg` and pass it to
 :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
 :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.

+ 1 - 1
MAINTAINERS

@@ -13101,7 +13101,7 @@ F:	drivers/input/serio/userio.c
 F:	include/uapi/linux/userio.h
 F:	include/uapi/linux/userio.h
 
 
 VIRTIO CONSOLE DRIVER
 VIRTIO CONSOLE DRIVER
-M:	Amit Shah <amit.shah@redhat.com>
+M:	Amit Shah <amit@kernel.org>
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 S:	Maintained
 F:	drivers/char/virtio_console.c
 F:	drivers/char/virtio_console.c

+ 2 - 2
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 10
 PATCHLEVEL = 10
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -797,7 +797,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 
 # check for 'asm goto'
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 endif

+ 1 - 1
arch/arc/kernel/unaligned.c

@@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
 
 
 	/* clear any remanants of delay slot */
 	/* clear any remanants of delay slot */
 	if (delay_mode(regs)) {
 	if (delay_mode(regs)) {
-		regs->ret = regs->bta ~1U;
+		regs->ret = regs->bta & ~1U;
 		regs->status32 &= ~STATUS_DE_MASK;
 		regs->status32 &= ~STATUS_DE_MASK;
 	} else {
 	} else {
 		regs->ret += state.instr_len;
 		regs->ret += state.instr_len;

+ 1 - 1
arch/powerpc/Kconfig

@@ -164,7 +164,6 @@ config PPC
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_GZIP
-	select HAVE_CC_STACKPROTECTOR
 
 
 config GENERIC_CSUM
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
 	def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
 	bool "Build a relocatable kernel"
 	bool "Build a relocatable kernel"
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	select NONSTATIC_KERNEL
 	select NONSTATIC_KERNEL
+	select MODULE_REL_CRCS if MODVERSIONS
 	help
 	help
 	  This builds a kernel image that is capable of running at the
 	  This builds a kernel image that is capable of running at the
 	  location the kernel is loaded at. For ppc32, there is no any
 	  location the kernel is loaded at. For ppc32, there is no any

+ 2 - 0
arch/powerpc/include/asm/cpu_has_feature.h

@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 2 - 0
arch/powerpc/include/asm/mmu.h

@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 0 - 4
arch/powerpc/include/asm/module.h

@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 }
 #endif
 #endif
 
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif	/* _ASM_POWERPC_MODULE_H */
 #endif	/* _ASM_POWERPC_MODULE_H */

+ 0 - 40
arch/powerpc/include/asm/stackprotector.h

@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-	unsigned long canary;
-
-	/* Try to get a semi random initial value. */
-	get_random_bytes(&canary, sizeof(canary));
-	canary ^= mftb();
-	canary ^= LINUX_VERSION_CODE;
-
-	current->stack_canary = canary;
-	__stack_chk_guard = current->stack_canary;
-}
-
-#endif	/* _ASM_STACKPROTECTOR_H */

+ 0 - 4
arch/powerpc/kernel/Makefile

@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)

+ 0 - 3
arch/powerpc/kernel/asm-offsets.c

@@ -91,9 +91,6 @@ int main(void)
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 #endif
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-	DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
 #ifdef CONFIG_BOOKE

+ 1 - 1
arch/powerpc/kernel/eeh_driver.c

@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
 {
 	struct eeh_pe *pe = (struct eeh_pe *)data;
 	struct eeh_pe *pe = (struct eeh_pe *)data;
-	bool *clear_sw_state = flag;
+	bool clear_sw_state = *(bool *)flag;
 	int i, rc = 1;
 	int i, rc = 1;
 
 
 	for (i = 0; rc && i < 3; i++)
 	for (i = 0; rc && i < 3; i++)

+ 1 - 5
arch/powerpc/kernel/entry_32.S

@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-	lwz	r0,TSK_STACK_CANARY(r2)
-	lis	r4,__stack_chk_guard@ha
-	stw	r0,__stack_chk_guard@l(r4)
-#endif
+
 	lwz	r0,_CCR(r1)
 	lwz	r0,_CCR(r1)
 	mtcrf	0xFF,r0
 	mtcrf	0xFF,r0
 	/* r3-r12 are destroyed -- Cort */
 	/* r3-r12 are destroyed -- Cort */

+ 0 - 8
arch/powerpc/kernel/module_64.c

@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
 	for (end = (void *)vers + size; vers < end; vers++)
 	for (end = (void *)vers + size; vers < end; vers++)
 		if (vers->name[0] == '.') {
 		if (vers->name[0] == '.') {
 			memmove(vers->name, vers->name+1, strlen(vers->name));
 			memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-			/* The TOC symbol has no CRC computed. To avoid CRC
-			 * check failing, we must force it to the expected
-			 * value (see CRC check in module.c).
-			 */
-			if (!strcmp(vers->name, "TOC."))
-				vers->crc = -(unsigned long)reloc_start;
-#endif
 		}
 		}
 }
 }
 
 

+ 0 - 6
arch/powerpc/kernel/process.c

@@ -64,12 +64,6 @@
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
 #define TM_DEBUG(x...) printk(KERN_INFO x)

+ 3 - 0
arch/powerpc/kernel/prom_init.c

@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
 
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
 
+	if (!PHANDLE_VALID(cpu_pkg))
+		return;
+
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom.cpu = be32_to_cpu(rval);
 	prom.cpu = be32_to_cpu(rval);
 
 

+ 2 - 2
arch/powerpc/mm/pgtable-radix.c

@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		if (!pmdp)
 		if (!pmdp)
 			return -ENOMEM;
 			return -ENOMEM;
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		ptep = pte_alloc_kernel(pmdp, ea);
 		ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		}
 		}
 		pmdp = pmd_offset(pudp, ea);
 		pmdp = pmd_offset(pudp, ea);
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		if (!pmd_present(*pmdp)) {
 		if (!pmd_present(*pmdp)) {

+ 4 - 4
arch/x86/crypto/aesni-intel_glue.c

@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
 		    aesni_simd_skciphers[i]; i++)
 		    aesni_simd_skciphers[i]; i++)
 		simd_skcipher_free(aesni_simd_skciphers[i]);
 		simd_skcipher_free(aesni_simd_skciphers[i]);
 
 
-	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
-		    aesni_simd_skciphers2[i].simd; i++)
-		simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
+		if (aesni_simd_skciphers2[i].simd)
+			simd_skcipher_free(aesni_simd_skciphers2[i].simd);
 }
 }
 
 
 static int __init aesni_init(void)
 static int __init aesni_init(void)
@@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
 		simd = simd_skcipher_create_compat(algname, drvname, basename);
 		simd = simd_skcipher_create_compat(algname, drvname, basename);
 		err = PTR_ERR(simd);
 		err = PTR_ERR(simd);
 		if (IS_ERR(simd))
 		if (IS_ERR(simd))
-			goto unregister_simds;
+			continue;
 
 
 		aesni_simd_skciphers2[i].simd = simd;
 		aesni_simd_skciphers2[i].simd = simd;
 	}
 	}

+ 26 - 34
arch/x86/events/intel/rapl.c

@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
 {
-	return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 }
 
 
 static inline u64 rapl_read_counter(struct perf_event *event)
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
 
 	/* must be done before validate_group */
 	/* must be done before validate_group */
 	pmu = cpu_to_rapl_pmu(event->cpu);
 	pmu = cpu_to_rapl_pmu(event->cpu);
+	if (!pmu)
+		return -EINVAL;
 	event->cpu = pmu->cpu;
 	event->cpu = pmu->cpu;
 	event->pmu_private = pmu;
 	event->pmu_private = pmu;
 	event->hw.event_base = msr;
 	event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 	int target;
 
 
+	if (!pmu) {
+		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+		if (!pmu)
+			return -ENOMEM;
+
+		raw_spin_lock_init(&pmu->lock);
+		INIT_LIST_HEAD(&pmu->active_list);
+		pmu->pmu = &rapl_pmus->pmu;
+		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+		rapl_hrtimer_init(pmu);
+
+		rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+	}
+
 	/*
 	/*
 	 * Check if there is an online cpu in the package which collects rapl
 	 * Check if there is an online cpu in the package which collects rapl
 	 * events already.
 	 * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
 	return 0;
 	return 0;
 }
 }
 
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-	if (pmu)
-		return 0;
-
-	pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-	if (!pmu)
-		return -ENOMEM;
-
-	raw_spin_lock_init(&pmu->lock);
-	INIT_LIST_HEAD(&pmu->active_list);
-	pmu->pmu = &rapl_pmus->pmu;
-	pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-	pmu->cpu = -1;
-	rapl_hrtimer_init(pmu);
-	rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-	return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 static int rapl_check_hw_unit(bool apply_quirk)
 {
 {
 	u64 msr_rapl_power_unit_bits;
 	u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
 	/*
 	/*
 	 * Install callbacks. Core will call them for each online cpu.
 	 * Install callbacks. Core will call them for each online cpu.
 	 */
 	 */
-
-	ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-				rapl_cpu_prepare, NULL);
-	if (ret)
-		goto out;
-
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 				"perf/x86/rapl:online",
 				"perf/x86/rapl:online",
 				rapl_cpu_online, rapl_cpu_offline);
 				rapl_cpu_online, rapl_cpu_offline);
 	if (ret)
 	if (ret)
-		goto out1;
+		goto out;
 
 
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	if (ret)
 	if (ret)
-		goto out2;
+		goto out1;
 
 
 	rapl_advertise();
 	rapl_advertise();
 	return 0;
 	return 0;
 
 
-out2:
-	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
 out1:
-	cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
 out:
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 static void __exit intel_rapl_exit(void)
 {
 {
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
 }
 }

+ 91 - 141
arch/x86/events/intel/uncore.c

@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
 {
-	return pmu->boxes[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 }
 
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
 	pmu->registered = false;
 	pmu->registered = false;
 }
 }
 
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-	struct intel_uncore_pmu *pmu = type->pmus;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	if (pmu) {
-		pkg = topology_physical_package_id(cpu);
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box)
-				uncore_box_exit(box);
-		}
-	}
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-	struct intel_uncore_type **types;
-
-	for (types = uncore_msr_uncores; *types; types++)
-		__uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
 {
 	int pkg;
 	int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
 	}
 	}
 }
 }
 
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box && atomic_dec_return(&box->refcnt) == 0)
-				uncore_box_exit(box);
-		}
-	}
-	return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg, ncpus = 1;
-
-	if (first_init) {
-		/*
-		 * On init we get the number of online cpus in the package
-		 * and set refcount for all of them.
-		 */
-		ncpus = cpumask_weight(topology_core_cpumask(cpu));
-	}
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (!box)
-				continue;
-			/* The first cpu on a package activates the box */
-			if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-				uncore_box_init(box);
-		}
-	}
-
-	return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			if (pmu->boxes[pkg])
-				continue;
-			/* First cpu of a package allocates the box */
-			box = uncore_alloc_box(type, cpu_to_node(cpu));
-			if (!box)
-				return -ENOMEM;
-			box->pmu = pmu;
-			box->pkgid = pkg;
-			pmu->boxes[pkg] = box;
-		}
-	}
-	return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 				   int new_cpu)
 				   int new_cpu)
 {
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, pkg, target;
 
 
 	/* Check if exiting cpu is used for collecting uncore events */
 	/* Check if exiting cpu is used for collecting uncore events */
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-		return 0;
-
+		goto unref;
 	/* Find a new cpu to collect uncore events */
 	/* Find a new cpu to collect uncore events */
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
 
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+	/* Clear the references */
+	pkg = topology_logical_package_id(cpu);
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (box && atomic_dec_return(&box->refcnt) == 0)
+				uncore_box_exit(box);
+		}
+	}
 	return 0;
 	return 0;
 }
 }
 
 
+static int allocate_boxes(struct intel_uncore_type **types,
+			 unsigned int pkg, unsigned int cpu)
+{
+	struct intel_uncore_box *box, *tmp;
+	struct intel_uncore_type *type;
+	struct intel_uncore_pmu *pmu;
+	LIST_HEAD(allocated);
+	int i;
+
+	/* Try to allocate all required boxes */
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			if (pmu->boxes[pkg])
+				continue;
+			box = uncore_alloc_box(type, cpu_to_node(cpu));
+			if (!box)
+				goto cleanup;
+			box->pmu = pmu;
+			box->pkgid = pkg;
+			list_add(&box->active_list, &allocated);
+		}
+	}
+	/* Install them in the pmus */
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		box->pmu->boxes[pkg] = box;
+	}
+	return 0;
+
+cleanup:
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		kfree(box);
+	}
+	return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 static int uncore_event_cpu_online(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, ret, pkg, target;
+
+	pkg = topology_logical_package_id(cpu);
+	ret = allocate_boxes(types, pkg, cpu);
+	if (ret)
+		return ret;
+
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (!box && atomic_inc_return(&box->refcnt) == 1)
+				uncore_box_init(box);
+		}
+	}
 
 
 	/*
 	/*
 	 * Check if there is an online cpu in the package
 	 * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
 	if (cret && pret)
 	if (cret && pret)
 		return -ENODEV;
 		return -ENODEV;
 
 
-	/*
-	 * Install callbacks. Core will call them for each online cpu.
-	 *
-	 * The first online cpu of each package allocates and takes
-	 * the refcounts for all other online cpus in that package.
-	 * If msrs are not enabled no allocation is required and
-	 * uncore_cpu_prepare() is not called for each online cpu.
-	 */
-	if (!cret) {
-	       ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-				       "perf/x86/intel/uncore:prepare",
-				       uncore_cpu_prepare, NULL);
-		if (ret)
-			goto err;
-	} else {
-		cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-					  "perf/x86/intel/uncore:prepare",
-					  uncore_cpu_prepare, NULL);
-	}
-	first_init = 1;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-			  "perf/x86/uncore:starting",
-			  uncore_cpu_starting, uncore_cpu_dying);
-	first_init = 0;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-			  "perf/x86/uncore:online",
-			  uncore_event_cpu_online, uncore_event_cpu_offline);
+	/* Install hotplug callbacks to setup the targets for each package */
+	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+				"perf/x86/intel/uncore:online",
+				uncore_event_cpu_online,
+				uncore_event_cpu_offline);
+	if (ret)
+		goto err;
 	return 0;
 	return 0;
 
 
 err:
 err:
-	/* Undo box->init_box() */
-	on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 	return ret;
 	return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
 
 
 static void __exit intel_uncore_exit(void)
 static void __exit intel_uncore_exit(void)
 {
 {
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 }
 }

+ 1 - 0
arch/x86/include/asm/microcode.h

@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 #else
 static inline int __init microcode_init(void)			{ return 0; };
 static inline int __init microcode_init(void)			{ return 0; };
 static inline void __init load_ucode_bsp(void)			{ }
 static inline void __init load_ucode_bsp(void)			{ }

+ 2 - 0
arch/x86/kernel/apic/io_apic.c

@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
 			if (idx != -1 && irq_trigger(idx))
 			if (idx != -1 && irq_trigger(idx))
 				unmask_ioapic_irq(irq_get_chip_data(0));
 				unmask_ioapic_irq(irq_get_chip_data(0));
 		}
 		}
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {
 			if (disable_timer_pin_1 > 0)
 			if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
 		 * legacy devices should be connected to IO APIC #0
 		 * legacy devices should be connected to IO APIC #0
 		 */
 		 */
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		legacy_pic->unmask(0);
 		legacy_pic->unmask(0);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {

+ 12 - 19
arch/x86/kernel/cpu/mcheck/mce.c

@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
 {
 	unsigned long when = jiffies + interval;
 	unsigned long when = jiffies + interval;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
-	if (timer_pending(t)) {
-		if (time_before(when, t->expires))
-			mod_timer(t, when);
-	} else {
-		t->expires = round_jiffies(when);
-		add_timer_on(t, smp_processor_id());
-	}
+	if (!timer_pending(t) || time_before(when, t->expires))
+		mod_timer(t, round_jiffies(when));
 
 
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
 
 
 done:
 done:
 	__this_cpu_write(mce_next_interval, iv);
 	__this_cpu_write(mce_next_interval, iv);
-	__restart_timer(t, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 /*
 /*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 
 
-	__restart_timer(t, interval);
+	__start_timer(t, interval);
 
 
 	if (interval < iv)
 	if (interval < iv)
 		__this_cpu_write(mce_next_interval, interval);
 		__this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
 	}
 	}
 }
 }
 
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
 {
 	unsigned long iv = check_interval * HZ;
 	unsigned long iv = check_interval * HZ;
 
 
 	if (mca_cfg.ignore_ce || !iv)
 	if (mca_cfg.ignore_ce || !iv)
 		return;
 		return;
 
 
-	per_cpu(mce_next_interval, cpu) = iv;
-
-	t->expires = round_jiffies(jiffies + iv);
-	add_timer_on(t, cpu);
+	this_cpu_write(mce_next_interval, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 static void __mcheck_cpu_setup_timer(void)
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 
 
 	setup_pinned_timer(t, mce_timer_fn, cpu);
 	setup_pinned_timer(t, mce_timer_fn, cpu);
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 }
 }
 
 
 /* Handle unconfigured int18 (should never happen) */
 /* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 
 static int mce_cpu_online(unsigned int cpu)
 static int mce_cpu_online(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	int ret;
 	int ret;
 
 
 	mce_device_create(cpu);
 	mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
 		return ret;
 		return ret;
 	}
 	}
 	mce_reenable_cpu();
 	mce_reenable_cpu();
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 	return 0;
 	return 0;
 }
 }
 
 
 static int mce_cpu_pre_down(unsigned int cpu)
 static int mce_cpu_pre_down(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 
 
 	mce_disable_cpu();
 	mce_disable_cpu();
 	del_timer_sync(t);
 	del_timer_sync(t);

+ 3 - 2
arch/x86/kernel/cpu/microcode/amd.c

@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
 reget:
 reget:
 		if (!get_builtin_microcode(&cp, family)) {
 		if (!get_builtin_microcode(&cp, family)) {
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
-			cp = find_cpio_data(ucode_path, (void *)initrd_start,
-					    initrd_end - initrd_start, NULL);
+			if (!initrd_gone)
+				cp = find_cpio_data(ucode_path, (void *)initrd_start,
+						    initrd_end - initrd_start, NULL);
 #endif
 #endif
 			if (!(cp.data && cp.size)) {
 			if (!(cp.data && cp.size)) {
 				/*
 				/*

+ 17 - 5
arch/x86/kernel/cpu/microcode/core.c

@@ -46,6 +46,8 @@
 static struct microcode_ops	*microcode_ops;
 static struct microcode_ops	*microcode_ops;
 static bool dis_ucode_ldr = true;
 static bool dis_ucode_ldr = true;
 
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 LIST_HEAD(microcode_cache);
 
 
 /*
 /*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 static int __init save_microcode_in_initrd(void)
 {
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 	struct cpuinfo_x86 *c = &boot_cpu_data;
+	int ret = -EINVAL;
 
 
 	switch (c->x86_vendor) {
 	switch (c->x86_vendor) {
 	case X86_VENDOR_INTEL:
 	case X86_VENDOR_INTEL:
 		if (c->x86 >= 6)
 		if (c->x86 >= 6)
-			return save_microcode_in_initrd_intel();
+			ret = save_microcode_in_initrd_intel();
 		break;
 		break;
 	case X86_VENDOR_AMD:
 	case X86_VENDOR_AMD:
 		if (c->x86 >= 0x10)
 		if (c->x86 >= 0x10)
-			return save_microcode_in_initrd_amd(c->x86);
+			ret = save_microcode_in_initrd_amd(c->x86);
 		break;
 		break;
 	default:
 	default:
 		break;
 		break;
 	}
 	}
 
 
-	return -EINVAL;
+	initrd_gone = true;
+
+	return ret;
 }
 }
 
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 	 * has the virtual address of the beginning of the initrd. It also
 	 * has the virtual address of the beginning of the initrd. It also
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * the updated address so use that instead.
 	 * the updated address so use that instead.
+	 *
+	 * initrd_gone is for the hotplug case where we've thrown out initrd
+	 * already.
 	 */
 	 */
-	if (!use_pa && initrd_start)
-		start = initrd_start;
+	if (!use_pa) {
+		if (initrd_gone)
+			return (struct cpio_data){ NULL, 0, "" };
+		if (initrd_start)
+			start = initrd_start;
+	}
 
 
 	return find_cpio_data(path, (void *)start, size, NULL);
 	return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
 #else /* !CONFIG_BLK_DEV_INITRD */

+ 1 - 8
arch/x86/kernel/cpu/microcode/intel.c

@@ -41,7 +41,7 @@
 
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 struct microcode_intel *intel_ucode_patch;
 
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
 	struct ucode_cpu_info uci;
 	struct ucode_cpu_info uci;
 	struct cpio_data cp;
 	struct cpio_data cp;
 
 
-	/*
-	 * AP loading didn't find any microcode patch, no need to save anything.
-	 */
-	if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-		return 0;
-
 	if (!load_builtin_intel_microcode(&cp))
 	if (!load_builtin_intel_microcode(&cp))
 		cp = find_microcode_in_initrd(ucode_path, false);
 		cp = find_microcode_in_initrd(ucode_path, false);
 
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
 	return 0;
 	return 0;
 }
 }
 
 
-
 /*
 /*
  * @res_patch, output: a pointer to the patch we found.
  * @res_patch, output: a pointer to the patch we found.
  */
  */

+ 3 - 1
arch/x86/kernel/fpu/core.c

@@ -9,6 +9,7 @@
 #include <asm/fpu/regset.h>
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
 #include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
 #include <asm/traps.h>
 #include <asm/traps.h>
 
 
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
 	 * it will #GP. Make sure it is replaced after the memset().
 	 * it will #GP. Make sure it is replaced after the memset().
 	 */
 	 */
 	if (static_cpu_has(X86_FEATURE_XSAVES))
 	if (static_cpu_has(X86_FEATURE_XSAVES))
-		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+					       xfeatures_mask;
 
 
 	if (static_cpu_has(X86_FEATURE_FXSR))
 	if (static_cpu_has(X86_FEATURE_FXSR))
 		fpstate_init_fxstate(&state->fxsave);
 		fpstate_init_fxstate(&state->fxsave);

+ 1 - 0
arch/x86/kernel/hpet.c

@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
 	} else {
 	} else {
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
 
+		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		disable_irq(hdev->irq);
 		disable_irq(hdev->irq);
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));

+ 1 - 0
arch/x86/kvm/x86.c

@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
 
 	/* Set XSTATE_BV */
 	/* Set XSTATE_BV */
+	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
 
 	/*
 	/*

+ 16 - 0
arch/x86/platform/efi/efi_64.c

@@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
 
 	efi_scratch.use_pgd = true;
 	efi_scratch.use_pgd = true;
 
 
+	/*
+	 * Certain firmware versions are way too sentimential and still believe
+	 * they are exclusive and unquestionable owners of the first physical page,
+	 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+	 * (but then write-access it later during SetVirtualAddressMap()).
+	 *
+	 * Create a 1:1 mapping for this page, to avoid triple faults during early
+	 * boot with such firmware. We are free to hand this page to the BIOS,
+	 * as trim_bios_range() will reserve the first page and isolate it away
+	 * from memory allocators anyway.
+	 */
+	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+		pr_err("Failed to create 1:1 mapping for the first page!\n");
+		return 1;
+	}
+
 	/*
 	/*
 	 * When making calls to the firmware everything needs to be 1:1
 	 * When making calls to the firmware everything needs to be 1:1
 	 * mapped and addressable with 32-bit pointers. Map the kernel
 	 * mapped and addressable with 32-bit pointers. Map the kernel

+ 1 - 1
arch/xtensa/kernel/setup.c

@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 
 void cpu_reset(void)
 void cpu_reset(void)
 {
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
 	local_irq_disable();
 	local_irq_disable();
 	/*
 	/*
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must

+ 1 - 1
crypto/algif_aead.c

@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
 unlock:
 unlock:
 	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
 	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
 		af_alg_free_sg(&rsgl->sgl);
 		af_alg_free_sg(&rsgl->sgl);
+		list_del(&rsgl->list);
 		if (rsgl != &ctx->first_rsgl)
 		if (rsgl != &ctx->first_rsgl)
 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
-		list_del(&rsgl->list);
 	}
 	}
 	INIT_LIST_HEAD(&ctx->list);
 	INIT_LIST_HEAD(&ctx->list);
 	aead_wmem_wakeup(sk);
 	aead_wmem_wakeup(sk);

+ 5 - 1
drivers/acpi/nfit/core.c

@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
 	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
 	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
 	struct device *dev = acpi_desc->dev;
 	struct device *dev = acpi_desc->dev;
 	struct acpi_nfit_flush_work flush;
 	struct acpi_nfit_flush_work flush;
+	int rc;
 
 
 	/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
 	/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
 	device_lock(dev);
 	device_lock(dev);
@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
 	INIT_WORK_ONSTACK(&flush.work, flush_probe);
 	INIT_WORK_ONSTACK(&flush.work, flush_probe);
 	COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
 	COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
 	queue_work(nfit_wq, &flush.work);
 	queue_work(nfit_wq, &flush.work);
-	return wait_for_completion_interruptible(&flush.cmp);
+
+	rc = wait_for_completion_interruptible(&flush.cmp);
+	cancel_work_sync(&flush.work);
+	return rc;
 }
 }
 
 
 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,

+ 1 - 4
drivers/base/firmware_class.c

@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 	struct firmware_buf *buf = fw_priv->buf;
 	struct firmware_buf *buf = fw_priv->buf;
 
 
 	__fw_load_abort(buf);
 	__fw_load_abort(buf);
-
-	/* avoid user action after loading abort */
-	fw_priv->buf = NULL;
 }
 }
 
 
 static LIST_HEAD(pending_fw_head);
 static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 
 
 	mutex_lock(&fw_lock);
 	mutex_lock(&fw_lock);
 	fw_buf = fw_priv->buf;
 	fw_buf = fw_priv->buf;
-	if (!fw_buf)
+	if (fw_state_is_aborted(&fw_buf->fw_st))
 		goto out;
 		goto out;
 
 
 	switch (loading) {
 	switch (loading) {

+ 6 - 6
drivers/base/memory.c

@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
 {
 	struct memory_block *mem = to_memory_block(dev);
 	struct memory_block *mem = to_memory_block(dev);
 	unsigned long start_pfn, end_pfn;
 	unsigned long start_pfn, end_pfn;
+	unsigned long valid_start, valid_end, valid_pages;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-	struct page *first_page;
 	struct zone *zone;
 	struct zone *zone;
 	int zone_shift = 0;
 	int zone_shift = 0;
 
 
 	start_pfn = section_nr_to_pfn(mem->start_section_nr);
 	start_pfn = section_nr_to_pfn(mem->start_section_nr);
 	end_pfn = start_pfn + nr_pages;
 	end_pfn = start_pfn + nr_pages;
-	first_page = pfn_to_page(start_pfn);
 
 
 	/* The block contains more than one zone can not be offlined. */
 	/* The block contains more than one zone can not be offlined. */
-	if (!test_pages_in_a_zone(start_pfn, end_pfn))
+	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
 		return sprintf(buf, "none\n");
 		return sprintf(buf, "none\n");
 
 
-	zone = page_zone(first_page);
+	zone = page_zone(pfn_to_page(valid_start));
+	valid_pages = valid_end - valid_start;
 
 
 	/* MMOP_ONLINE_KEEP */
 	/* MMOP_ONLINE_KEEP */
 	sprintf(buf, "%s", zone->name);
 	sprintf(buf, "%s", zone->name);
 
 
 	/* MMOP_ONLINE_KERNEL */
 	/* MMOP_ONLINE_KERNEL */
-	zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+	zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
 	if (zone_shift) {
 	if (zone_shift) {
 		strcat(buf, " ");
 		strcat(buf, " ");
 		strcat(buf, (zone + zone_shift)->name);
 		strcat(buf, (zone + zone_shift)->name);
 	}
 	}
 
 
 	/* MMOP_ONLINE_MOVABLE */
 	/* MMOP_ONLINE_MOVABLE */
-	zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+	zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
 	if (zone_shift) {
 	if (zone_shift) {
 		strcat(buf, " ");
 		strcat(buf, " ");
 		strcat(buf, (zone + zone_shift)->name);
 		strcat(buf, (zone + zone_shift)->name);

+ 6 - 5
drivers/base/power/runtime.c

@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
 	unsigned long flags;
 	unsigned long flags;
 	int retval;
 	int retval;
 
 
-	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
 	if (rpmflags & RPM_GET_PUT) {
 	if (rpmflags & RPM_GET_PUT) {
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 			return 0;
 			return 0;
 	}
 	}
 
 
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
 	spin_lock_irqsave(&dev->power.lock, flags);
 	spin_lock_irqsave(&dev->power.lock, flags);
 	retval = rpm_idle(dev, rpmflags);
 	retval = rpm_idle(dev, rpmflags);
 	spin_unlock_irqrestore(&dev->power.lock, flags);
 	spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
 	unsigned long flags;
 	unsigned long flags;
 	int retval;
 	int retval;
 
 
-	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
 	if (rpmflags & RPM_GET_PUT) {
 	if (rpmflags & RPM_GET_PUT) {
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 			return 0;
 			return 0;
 	}
 	}
 
 
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
 	spin_lock_irqsave(&dev->power.lock, flags);
 	spin_lock_irqsave(&dev->power.lock, flags);
 	retval = rpm_suspend(dev, rpmflags);
 	retval = rpm_suspend(dev, rpmflags);
 	spin_unlock_irqrestore(&dev->power.lock, flags);
 	spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
 	unsigned long flags;
 	unsigned long flags;
 	int retval;
 	int retval;
 
 
-	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+			dev->power.runtime_status != RPM_ACTIVE);
 
 
 	if (rpmflags & RPM_GET_PUT)
 	if (rpmflags & RPM_GET_PUT)
 		atomic_inc(&dev->power.usage_count);
 		atomic_inc(&dev->power.usage_count);

+ 14 - 3
drivers/cpufreq/brcmstb-avs-cpufreq.c

@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
 static int brcm_avs_suspend(struct cpufreq_policy *policy)
 static int brcm_avs_suspend(struct cpufreq_policy *policy)
 {
 {
 	struct private_data *priv = policy->driver_data;
 	struct private_data *priv = policy->driver_data;
+	int ret;
+
+	ret = brcm_avs_get_pmap(priv, &priv->pmap);
+	if (ret)
+		return ret;
 
 
-	return brcm_avs_get_pmap(priv, &priv->pmap);
+	/*
+	 * We can't use the P-state returned by brcm_avs_get_pmap(), since
+	 * that's the initial P-state from when the P-map was downloaded to the
+	 * AVS co-processor, not necessarily the P-state we are running at now.
+	 * So, we get the current P-state explicitly.
+	 */
+	return brcm_avs_get_pstate(priv, &priv->pmap.state);
 }
 }
 
 
 static int brcm_avs_resume(struct cpufreq_policy *policy)
 static int brcm_avs_resume(struct cpufreq_policy *policy)
@@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
 	brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
 	brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
 	brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
 	brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
 
 
-	return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+	return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
 		pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
 		pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
-		mdiv_p3, mdiv_p4);
+		mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
 }
 }
 
 
 static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
 static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)

+ 30 - 0
drivers/cpufreq/intel_pstate.c

@@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
 		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
 }
 }
 
 
+#define MSR_IA32_POWER_CTL_BIT_EE	19
+
+/* Disable energy efficiency optimization */
+static void intel_pstate_disable_ee(int cpu)
+{
+	u64 power_ctl;
+	int ret;
+
+	ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
+	if (ret)
+		return;
+
+	if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
+		pr_info("Disabling energy efficiency optimization\n");
+		power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+		wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
+	}
+}
+
 static int atom_get_min_pstate(void)
 static int atom_get_min_pstate(void)
 {
 {
 	u64 value;
 	u64 value;
@@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
 	{}
 	{}
 };
 };
 
 
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
+	ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
+	{}
+};
+
 static int intel_pstate_init_cpu(unsigned int cpunum)
 static int intel_pstate_init_cpu(unsigned int cpunum)
 {
 {
 	struct cpudata *cpu;
 	struct cpudata *cpu;
@@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 	cpu->cpu = cpunum;
 	cpu->cpu = cpunum;
 
 
 	if (hwp_active) {
 	if (hwp_active) {
+		const struct x86_cpu_id *id;
+
+		id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+		if (id)
+			intel_pstate_disable_ee(cpunum);
+
 		intel_pstate_hwp_enable(cpu);
 		intel_pstate_hwp_enable(cpu);
 		pid_params.sample_rate_ms = 50;
 		pid_params.sample_rate_ms = 50;
 		pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
 		pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;

+ 1 - 1
drivers/crypto/ccp/ccp-dev-v5.c

@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
 static void ccp5_config(struct ccp_device *ccp)
 static void ccp5_config(struct ccp_device *ccp)
 {
 {
 	/* Public side */
 	/* Public side */
-	iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+	iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
 }
 }
 
 
 static void ccp5other_config(struct ccp_device *ccp)
 static void ccp5other_config(struct ccp_device *ccp)

+ 1 - 0
drivers/crypto/ccp/ccp-dev.h

@@ -238,6 +238,7 @@ struct ccp_dma_chan {
 	struct ccp_device *ccp;
 	struct ccp_device *ccp;
 
 
 	spinlock_t lock;
 	spinlock_t lock;
+	struct list_head created;
 	struct list_head pending;
 	struct list_head pending;
 	struct list_head active;
 	struct list_head active;
 	struct list_head complete;
 	struct list_head complete;

+ 5 - 1
drivers/crypto/ccp/ccp-dmaengine.c

@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
 	ccp_free_desc_resources(chan->ccp, &chan->complete);
 	ccp_free_desc_resources(chan->ccp, &chan->complete);
 	ccp_free_desc_resources(chan->ccp, &chan->active);
 	ccp_free_desc_resources(chan->ccp, &chan->active);
 	ccp_free_desc_resources(chan->ccp, &chan->pending);
 	ccp_free_desc_resources(chan->ccp, &chan->pending);
+	ccp_free_desc_resources(chan->ccp, &chan->created);
 
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
 }
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
 	spin_lock_irqsave(&chan->lock, flags);
 	spin_lock_irqsave(&chan->lock, flags);
 
 
 	cookie = dma_cookie_assign(tx_desc);
 	cookie = dma_cookie_assign(tx_desc);
+	list_del(&desc->entry);
 	list_add_tail(&desc->entry, &chan->pending);
 	list_add_tail(&desc->entry, &chan->pending);
 
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 	spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
 
 
 	spin_lock_irqsave(&chan->lock, sflags);
 	spin_lock_irqsave(&chan->lock, sflags);
 
 
-	list_add_tail(&desc->entry, &chan->pending);
+	list_add_tail(&desc->entry, &chan->created);
 
 
 	spin_unlock_irqrestore(&chan->lock, sflags);
 	spin_unlock_irqrestore(&chan->lock, sflags);
 
 
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
 	/*TODO: Purge the complete list? */
 	/*TODO: Purge the complete list? */
 	ccp_free_desc_resources(chan->ccp, &chan->active);
 	ccp_free_desc_resources(chan->ccp, &chan->active);
 	ccp_free_desc_resources(chan->ccp, &chan->pending);
 	ccp_free_desc_resources(chan->ccp, &chan->pending);
+	ccp_free_desc_resources(chan->ccp, &chan->created);
 
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 	spin_unlock_irqrestore(&chan->lock, flags);
 
 
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
 		chan->ccp = ccp;
 		chan->ccp = ccp;
 
 
 		spin_lock_init(&chan->lock);
 		spin_lock_init(&chan->lock);
+		INIT_LIST_HEAD(&chan->created);
 		INIT_LIST_HEAD(&chan->pending);
 		INIT_LIST_HEAD(&chan->pending);
 		INIT_LIST_HEAD(&chan->active);
 		INIT_LIST_HEAD(&chan->active);
 		INIT_LIST_HEAD(&chan->complete);
 		INIT_LIST_HEAD(&chan->complete);

+ 28 - 25
drivers/crypto/chelsio/chcr_algo.c

@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 	case CRYPTO_ALG_TYPE_AEAD:
 	case CRYPTO_ALG_TYPE_AEAD:
 		ctx_req.req.aead_req = (struct aead_request *)req;
 		ctx_req.req.aead_req = (struct aead_request *)req;
 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
-		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
 		if (ctx_req.ctx.reqctx->skb) {
 		if (ctx_req.ctx.reqctx->skb) {
 			kfree_skb(ctx_req.ctx.reqctx->skb);
 			kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	struct chcr_wr *chcr_req;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct phys_sge_parm sg_param;
 	struct phys_sge_parm sg_param;
-	struct scatterlist *src, *dst;
-	struct scatterlist src_sg[2], dst_sg[2];
+	struct scatterlist *src;
 	unsigned int frags = 0, transhdr_len;
 	unsigned int frags = 0, transhdr_len;
 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
 	unsigned int   kctx_len = 0;
 	unsigned int   kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 
 
 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
 		goto err;
 		goto err;
-	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-	dst = src;
+	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+	reqctx->dst = src;
+
 	if (req->src != req->dst) {
 	if (req->src != req->dst) {
 		err = chcr_copy_assoc(req, aeadctx);
 		err = chcr_copy_assoc(req, aeadctx);
 		if (err)
 		if (err)
 			return ERR_PTR(err);
 			return ERR_PTR(err);
-		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+					       req->assoclen);
 	}
 	}
 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
 		null = 1;
 		null = 1;
 		assoclen = 0;
 		assoclen = 0;
 	}
 	}
-	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
 					     (op_type ? -authsize : authsize));
 					     (op_type ? -authsize : authsize));
 	if (reqctx->dst_nents <= 0) {
 	if (reqctx->dst_nents <= 0) {
 		pr_err("AUTHENC:Invalid Destination sg entries\n");
 		pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.qid = qid;
 	sg_param.qid = qid;
 	sg_param.align = 0;
 	sg_param.align = 0;
-	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
 				  &sg_param))
 				  &sg_param))
 		goto dstmap_fail;
 		goto dstmap_fail;
 
 
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	struct chcr_wr *chcr_req;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct phys_sge_parm sg_param;
 	struct phys_sge_parm sg_param;
-	struct scatterlist *src, *dst;
-	struct scatterlist src_sg[2], dst_sg[2];
+	struct scatterlist *src;
 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
 	unsigned int dst_size = 0, kctx_len;
 	unsigned int dst_size = 0, kctx_len;
 	unsigned int sub_type;
 	unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
 		goto err;
 		goto err;
 	sub_type = get_aead_subtype(tfm);
 	sub_type = get_aead_subtype(tfm);
-	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-	dst = src;
+	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+	reqctx->dst = src;
+
 	if (req->src != req->dst) {
 	if (req->src != req->dst) {
 		err = chcr_copy_assoc(req, aeadctx);
 		err = chcr_copy_assoc(req, aeadctx);
 		if (err) {
 		if (err) {
 			pr_err("AAD copy to destination buffer fails\n");
 			pr_err("AAD copy to destination buffer fails\n");
 			return ERR_PTR(err);
 			return ERR_PTR(err);
 		}
 		}
-		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+					       req->assoclen);
 	}
 	}
-	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
 					     (op_type ? -authsize : authsize));
 					     (op_type ? -authsize : authsize));
 	if (reqctx->dst_nents <= 0) {
 	if (reqctx->dst_nents <= 0) {
 		pr_err("CCM:Invalid Destination sg entries\n");
 		pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.qid = qid;
 	sg_param.qid = qid;
 	sg_param.align = 0;
 	sg_param.align = 0;
-	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
 				  &sg_param))
 				  &sg_param))
 		goto dstmap_fail;
 		goto dstmap_fail;
 
 
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	struct chcr_wr *chcr_req;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct phys_sge_parm sg_param;
 	struct phys_sge_parm sg_param;
-	struct scatterlist *src, *dst;
-	struct scatterlist src_sg[2], dst_sg[2];
+	struct scatterlist *src;
 	unsigned int frags = 0, transhdr_len;
 	unsigned int frags = 0, transhdr_len;
 	unsigned int ivsize = AES_BLOCK_SIZE;
 	unsigned int ivsize = AES_BLOCK_SIZE;
 	unsigned int dst_size = 0, kctx_len;
 	unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
 		goto err;
 		goto err;
 
 
-	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-	dst = src;
+	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+	reqctx->dst = src;
 	if (req->src != req->dst) {
 	if (req->src != req->dst) {
 		err = chcr_copy_assoc(req, aeadctx);
 		err = chcr_copy_assoc(req, aeadctx);
 		if (err)
 		if (err)
 			return	ERR_PTR(err);
 			return	ERR_PTR(err);
-		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+					       req->assoclen);
 	}
 	}
 
 
 	if (!req->cryptlen)
 	if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 		crypt_len = AES_BLOCK_SIZE;
 		crypt_len = AES_BLOCK_SIZE;
 	else
 	else
 		crypt_len = req->cryptlen;
 		crypt_len = req->cryptlen;
-	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
 					     (op_type ? -authsize : authsize));
 					     (op_type ? -authsize : authsize));
 	if (reqctx->dst_nents <= 0) {
 	if (reqctx->dst_nents <= 0) {
 		pr_err("GCM:Invalid Destination sg entries\n");
 		pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.qid = qid;
 	sg_param.qid = qid;
 	sg_param.align = 0;
 	sg_param.align = 0;
-	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
 				  &sg_param))
 				  &sg_param))
 		goto dstmap_fail;
 		goto dstmap_fail;
 
 
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
 	} else {
 	} else {
 		aes_gcm_empty_pld_pad(req->dst, authsize - 1);
 		aes_gcm_empty_pld_pad(req->dst, authsize - 1);
-		write_sg_to_skb(skb, &frags, dst, crypt_len);
+		write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
+
 	}
 	}
 
 
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 	unsigned int ck_size;
 	unsigned int ck_size;
 	int ret = 0, key_ctx_size = 0;
 	int ret = 0, key_ctx_size = 0;
 
 
-	if (get_aead_subtype(aead) ==
-	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+	    keylen > 3) {
 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
 		memcpy(aeadctx->salt, key + keylen, 4);
 		memcpy(aeadctx->salt, key + keylen, 4);
 	}
 	}

+ 8 - 10
drivers/crypto/chelsio/chcr_core.c

@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
 int assign_chcr_device(struct chcr_dev **dev)
 int assign_chcr_device(struct chcr_dev **dev)
 {
 {
 	struct uld_ctx *u_ctx;
 	struct uld_ctx *u_ctx;
+	int ret = -ENXIO;
 
 
 	/*
 	/*
 	 * Which device to use if multiple devices are available TODO
 	 * Which device to use if multiple devices are available TODO
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
 	 * must go to the same device to maintain the ordering.
 	 * must go to the same device to maintain the ordering.
 	 */
 	 */
 	mutex_lock(&dev_mutex); /* TODO ? */
 	mutex_lock(&dev_mutex); /* TODO ? */
-	u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
-	if (!u_ctx) {
-		mutex_unlock(&dev_mutex);
-		return -ENXIO;
+	list_for_each_entry(u_ctx, &uld_ctx_list, entry)
+		if (u_ctx && u_ctx->dev) {
+			*dev = u_ctx->dev;
+			ret = 0;
+			break;
 	}
 	}
-
-	*dev = u_ctx->dev;
 	mutex_unlock(&dev_mutex);
 	mutex_unlock(&dev_mutex);
-	return 0;
+	return ret;
 }
 }
 
 
 static int chcr_dev_add(struct uld_ctx *u_ctx)
 static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
 
 
 static int __init chcr_crypto_init(void)
 static int __init chcr_crypto_init(void)
 {
 {
-	if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
+	if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
 		pr_err("ULD register fail: No chcr crypto support in cxgb4");
 		pr_err("ULD register fail: No chcr crypto support in cxgb4");
-		return -1;
-	}
 
 
 	return 0;
 	return 0;
 }
 }

+ 3 - 0
drivers/crypto/chelsio/chcr_crypto.h

@@ -158,6 +158,9 @@ struct ablk_ctx {
 };
 };
 struct chcr_aead_reqctx {
 struct chcr_aead_reqctx {
 	struct	sk_buff	*skb;
 	struct	sk_buff	*skb;
+	struct scatterlist *dst;
+	struct scatterlist srcffwd[2];
+	struct scatterlist dstffwd[2];
 	short int dst_nents;
 	short int dst_nents;
 	u16 verify;
 	u16 verify;
 	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];

+ 1 - 1
drivers/crypto/qat/qat_c62x/adf_drv.c

@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 			      &hw_data->accel_capabilities_mask);
 			      &hw_data->accel_capabilities_mask);
 
 
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
-	i = 0;
+	i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
 	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
 	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
 			 ADF_PCI_MAX_BARS * 2) {
 			 ADF_PCI_MAX_BARS * 2) {

+ 1 - 0
drivers/crypto/qat/qat_common/adf_accel_devices.h

@@ -69,6 +69,7 @@
 #define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
 #define ADF_PCI_MAX_BARS 3
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
 #define ADF_ETR_MAX_RINGS_PER_BANK 16

+ 2 - 2
drivers/crypto/qat/qat_common/qat_hal.c

@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
 	unsigned int csr_val;
 	unsigned int csr_val;
 	int times = 30;
 	int times = 30;
 
 
-	if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
+	if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
 		return 0;
 		return 0;
 
 
 	csr_val = ADF_CSR_RD(csr_addr, 0);
 	csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
 		(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
 		(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
 				 LOCAL_TO_XFER_REG_OFFSET);
 				 LOCAL_TO_XFER_REG_OFFSET);
 	handle->pci_dev = pci_info->pci_dev;
 	handle->pci_dev = pci_info->pci_dev;
-	if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
+	if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
 		sram_bar =
 		sram_bar =
 			&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
 			&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
 		handle->hal_sram_addr_v = sram_bar->virt_addr;
 		handle->hal_sram_addr_v = sram_bar->virt_addr;

+ 3 - 11
drivers/firmware/efi/libstub/fdt.c

@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
 struct exit_boot_struct {
 struct exit_boot_struct {
 	efi_memory_desc_t *runtime_map;
 	efi_memory_desc_t *runtime_map;
 	int *runtime_entry_count;
 	int *runtime_entry_count;
+	void *new_fdt_addr;
 };
 };
 
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
 	efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
 	efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
 			p->runtime_map, p->runtime_entry_count);
 			p->runtime_map, p->runtime_entry_count);
 
 
-	return EFI_SUCCESS;
+	return update_fdt_memmap(p->new_fdt_addr, map);
 }
 }
 
 
 /*
 /*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
 
 	priv.runtime_map = runtime_map;
 	priv.runtime_map = runtime_map;
 	priv.runtime_entry_count = &runtime_entry_count;
 	priv.runtime_entry_count = &runtime_entry_count;
+	priv.new_fdt_addr = (void *)*new_fdt_addr;
 	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
 	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
 					exit_boot_func);
 					exit_boot_func);
 
 
 	if (status == EFI_SUCCESS) {
 	if (status == EFI_SUCCESS) {
 		efi_set_virtual_address_map_t *svam;
 		efi_set_virtual_address_map_t *svam;
 
 
-		status = update_fdt_memmap((void *)*new_fdt_addr, &map);
-		if (status != EFI_SUCCESS) {
-			/*
-			 * The kernel won't get far without the memory map, but
-			 * may still be able to print something meaningful so
-			 * return success here.
-			 */
-			return EFI_SUCCESS;
-		}
-
 		/* Install the new virtual address map */
 		/* Install the new virtual address map */
 		svam = sys_table->runtime->set_virtual_address_map;
 		svam = sys_table->runtime->set_virtual_address_map;
 		status = svam(runtime_entry_count * desc_size, desc_size,
 		status = svam(runtime_entry_count * desc_size, desc_size,

+ 3 - 1
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c

@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 	}
 	}
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
 
+	if (adev->mode_info.num_crtc)
+		amdgpu_display_set_vga_render_state(adev, false);
+
 	gmc_v6_0_mc_stop(adev, &save);
 	gmc_v6_0_mc_stop(adev, &save);
 
 
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
 	}
 	gmc_v6_0_mc_resume(adev, &save);
 	gmc_v6_0_mc_resume(adev, &save);
-	amdgpu_display_set_vga_render_state(adev, false);
 }
 }
 
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)

+ 8 - 5
drivers/gpu/drm/drm_atomic.c

@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
 	}
 	}
 
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct drm_pending_vblank_event *event = crtc_state->event;
 		/*
 		/*
-		 * TEST_ONLY and PAGE_FLIP_EVENT are mutually
-		 * exclusive, if they weren't, this code should be
-		 * called on success for TEST_ONLY too.
+		 * Free the allocated event. drm_atomic_helper_setup_commit
+		 * can allocate an event too, so only free it if it's ours
+		 * to prevent a double free in drm_atomic_state_clear.
 		 */
 		 */
-		if (crtc_state->event)
-			drm_event_cancel_free(dev, &crtc_state->event->base);
+		if (event && (event->base.fence || event->base.file_priv)) {
+			drm_event_cancel_free(dev, &event->base);
+			crtc_state->event = NULL;
+		}
 	}
 	}
 
 
 	if (!fence_state)
 	if (!fence_state)

+ 0 - 9
drivers/gpu/drm/drm_atomic_helper.c

@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
 
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-			continue;
-
 		if (funcs->prepare_fb) {
 		if (funcs->prepare_fb) {
 			ret = funcs->prepare_fb(plane, plane_state);
 			ret = funcs->prepare_fb(plane, plane_state);
 			if (ret)
 			if (ret)
@@ -1685,9 +1682,6 @@ fail:
 		if (j >= i)
 		if (j >= i)
 			continue;
 			continue;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-			continue;
-
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
 		if (funcs->cleanup_fb)
 		if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
 	for_each_plane_in_state(old_state, plane, plane_state, i) {
 	for_each_plane_in_state(old_state, plane, plane_state, i) {
 		const struct drm_plane_helper_funcs *funcs;
 		const struct drm_plane_helper_funcs *funcs;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
-			continue;
-
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
 		if (funcs->cleanup_fb)
 		if (funcs->cleanup_fb)

+ 18 - 5
drivers/gpu/drm/drm_connector.c

@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
 
 
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->modes);
 	INIT_LIST_HEAD(&connector->modes);
+	mutex_init(&connector->mutex);
 	connector->edid_blob_ptr = NULL;
 	connector->edid_blob_ptr = NULL;
 	connector->status = connector_status_unknown;
 	connector->status = connector_status_unknown;
 
 
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
 		connector->funcs->atomic_destroy_state(connector,
 		connector->funcs->atomic_destroy_state(connector,
 						       connector->state);
 						       connector->state);
 
 
+	mutex_destroy(&connector->mutex);
+
 	memset(connector, 0, sizeof(*connector));
 	memset(connector, 0, sizeof(*connector));
 }
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
  */
  */
 int drm_connector_register(struct drm_connector *connector)
 int drm_connector_register(struct drm_connector *connector)
 {
 {
-	int ret;
+	int ret = 0;
 
 
-	if (connector->registered)
+	if (!connector->dev->registered)
 		return 0;
 		return 0;
 
 
+	mutex_lock(&connector->mutex);
+	if (connector->registered)
+		goto unlock;
+
 	ret = drm_sysfs_connector_add(connector);
 	ret = drm_sysfs_connector_add(connector);
 	if (ret)
 	if (ret)
-		return ret;
+		goto unlock;
 
 
 	ret = drm_debugfs_connector_add(connector);
 	ret = drm_debugfs_connector_add(connector);
 	if (ret) {
 	if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
 	drm_mode_object_register(connector->dev, &connector->base);
 	drm_mode_object_register(connector->dev, &connector->base);
 
 
 	connector->registered = true;
 	connector->registered = true;
-	return 0;
+	goto unlock;
 
 
 err_debugfs:
 err_debugfs:
 	drm_debugfs_connector_remove(connector);
 	drm_debugfs_connector_remove(connector);
 err_sysfs:
 err_sysfs:
 	drm_sysfs_connector_remove(connector);
 	drm_sysfs_connector_remove(connector);
+unlock:
+	mutex_unlock(&connector->mutex);
 	return ret;
 	return ret;
 }
 }
 EXPORT_SYMBOL(drm_connector_register);
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
  */
  */
 void drm_connector_unregister(struct drm_connector *connector)
 void drm_connector_unregister(struct drm_connector *connector)
 {
 {
-	if (!connector->registered)
+	mutex_lock(&connector->mutex);
+	if (!connector->registered) {
+		mutex_unlock(&connector->mutex);
 		return;
 		return;
+	}
 
 
 	if (connector->funcs->early_unregister)
 	if (connector->funcs->early_unregister)
 		connector->funcs->early_unregister(connector);
 		connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
 	drm_debugfs_connector_remove(connector);
 	drm_debugfs_connector_remove(connector);
 
 
 	connector->registered = false;
 	connector->registered = false;
+	mutex_unlock(&connector->mutex);
 }
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 EXPORT_SYMBOL(drm_connector_unregister);
 
 

+ 4 - 0
drivers/gpu/drm/drm_drv.c

@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 	if (ret)
 	if (ret)
 		goto err_minors;
 		goto err_minors;
 
 
+	dev->registered = true;
+
 	if (dev->driver->load) {
 	if (dev->driver->load) {
 		ret = dev->driver->load(dev, flags);
 		ret = dev->driver->load(dev, flags);
 		if (ret)
 		if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
 
 	drm_lastclose(dev);
 	drm_lastclose(dev);
 
 
+	dev->registered = false;
+
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_modeset_unregister_all(dev);
 		drm_modeset_unregister_all(dev);
 
 

+ 4 - 12
drivers/gpu/drm/i915/i915_drv.h

@@ -1012,6 +1012,8 @@ struct intel_fbc {
 	struct work_struct underrun_work;
 	struct work_struct underrun_work;
 
 
 	struct intel_fbc_state_cache {
 	struct intel_fbc_state_cache {
+		struct i915_vma *vma;
+
 		struct {
 		struct {
 			unsigned int mode_flags;
 			unsigned int mode_flags;
 			uint32_t hsw_bdw_pixel_rate;
 			uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
 		} plane;
 		} plane;
 
 
 		struct {
 		struct {
-			u64 ilk_ggtt_offset;
 			uint32_t pixel_format;
 			uint32_t pixel_format;
 			unsigned int stride;
 			unsigned int stride;
-			int fence_reg;
-			unsigned int tiling_mode;
 		} fb;
 		} fb;
 	} state_cache;
 	} state_cache;
 
 
 	struct intel_fbc_reg_params {
 	struct intel_fbc_reg_params {
+		struct i915_vma *vma;
+
 		struct {
 		struct {
 			enum pipe pipe;
 			enum pipe pipe;
 			enum plane plane;
 			enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
 		} crtc;
 		} crtc;
 
 
 		struct {
 		struct {
-			u64 ggtt_offset;
 			uint32_t pixel_format;
 			uint32_t pixel_format;
 			unsigned int stride;
 			unsigned int stride;
-			int fence_reg;
 		} fb;
 		} fb;
 
 
 		int cfb_size;
 		int cfb_size;
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
 	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 }
 
 
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-			    const struct i915_ggtt_view *view)
-{
-	return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);

+ 20 - 0
drivers/gpu/drm/i915/intel_atomic_plane.c

@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
 
 	__drm_atomic_helper_plane_duplicate_state(plane, state);
 	__drm_atomic_helper_plane_duplicate_state(plane, state);
 
 
+	intel_state->vma = NULL;
+
 	return state;
 	return state;
 }
 }
 
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
 intel_plane_destroy_state(struct drm_plane *plane,
 			  struct drm_plane_state *state)
 			  struct drm_plane_state *state)
 {
 {
+	struct i915_vma *vma;
+
+	vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+	/*
+	 * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+	 * We currently don't clear all planes during driver unload, so we have
+	 * to be able to unpin vma here for now.
+	 *
+	 * Normally this can only happen during unload when kmscon is disabled
+	 * and userspace doesn't attempt to set a framebuffer at all.
+	 */
+	if (vma) {
+		mutex_lock(&plane->dev->struct_mutex);
+		intel_unpin_fb_vma(vma);
+		mutex_unlock(&plane->dev->struct_mutex);
+	}
+
 	drm_atomic_helper_plane_destroy_state(plane, state);
 	drm_atomic_helper_plane_destroy_state(plane, state);
 }
 }
 
 

+ 42 - 83
drivers/gpu/drm/i915/intel_display.c

@@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 			i915_vma_pin_fence(vma);
 			i915_vma_pin_fence(vma);
 	}
 	}
 
 
+	i915_vma_get(vma);
 err:
 err:
 	intel_runtime_pm_put(dev_priv);
 	intel_runtime_pm_put(dev_priv);
 	return vma;
 	return vma;
 }
 }
 
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
 {
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct i915_ggtt_view view;
-	struct i915_vma *vma;
-
-	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-	intel_fill_fb_ggtt_view(&view, fb, rotation);
-	vma = i915_gem_object_to_ggtt(obj, &view);
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
 
 
 	if (WARN_ON_ONCE(!vma))
 	if (WARN_ON_ONCE(!vma))
 		return;
 		return;
 
 
 	i915_vma_unpin_fence(vma);
 	i915_vma_unpin_fence(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
+	i915_vma_put(vma);
 }
 }
 
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *c;
 	struct drm_crtc *c;
-	struct intel_crtc *i;
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
 	struct drm_plane *primary = intel_crtc->base.primary;
 	struct drm_plane *primary = intel_crtc->base.primary;
 	struct drm_plane_state *plane_state = primary->state;
 	struct drm_plane_state *plane_state = primary->state;
@@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	 * an fb with another CRTC instead
 	 * an fb with another CRTC instead
 	 */
 	 */
 	for_each_crtc(dev, c) {
 	for_each_crtc(dev, c) {
-		i = to_intel_crtc(c);
+		struct intel_plane_state *state;
 
 
 		if (c == &intel_crtc->base)
 		if (c == &intel_crtc->base)
 			continue;
 			continue;
 
 
-		if (!i->active)
+		if (!to_intel_crtc(c)->active)
 			continue;
 			continue;
 
 
-		fb = c->primary->fb;
-		if (!fb)
+		state = to_intel_plane_state(c->primary->state);
+		if (!state->vma)
 			continue;
 			continue;
 
 
-		obj = intel_fb_obj(fb);
-		if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+		if (intel_plane_ggtt_offset(state) == plane_config->base) {
+			fb = c->primary->fb;
 			drm_framebuffer_reference(fb);
 			drm_framebuffer_reference(fb);
 			goto valid_fb;
 			goto valid_fb;
 		}
 		}
@@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	return;
 	return;
 
 
 valid_fb:
 valid_fb:
+	mutex_lock(&dev->struct_mutex);
+	intel_state->vma =
+		intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR(intel_state->vma)) {
+		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+		intel_state->vma = NULL;
+		drm_framebuffer_unreference(fb);
+		return;
+	}
+
 	plane_state->src_x = 0;
 	plane_state->src_x = 0;
 	plane_state->src_y = 0;
 	plane_state->src_y = 0;
 	plane_state->src_w = fb->width << 16;
 	plane_state->src_w = fb->width << 16;
@@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_GEN(dev_priv) >= 4) {
 	if (INTEL_GEN(dev_priv) >= 4) {
 		I915_WRITE(DSPSURF(plane),
 		I915_WRITE(DSPSURF(plane),
-			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_plane_ggtt_offset(plane_state) +
 			   intel_crtc->dspaddr_offset);
 			   intel_crtc->dspaddr_offset);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 	} else {
 	} else {
 		I915_WRITE(DSPADDR(plane),
 		I915_WRITE(DSPADDR(plane),
-			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_plane_ggtt_offset(plane_state) +
 			   intel_crtc->dspaddr_offset);
 			   intel_crtc->dspaddr_offset);
 	}
 	}
 	POSTING_READ(reg);
 	POSTING_READ(reg);
@@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
 
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSURF(plane),
 	I915_WRITE(DSPSURF(plane),
-		   intel_fb_gtt_offset(fb, rotation) +
+		   intel_plane_ggtt_offset(plane_state) +
 		   intel_crtc->dspaddr_offset);
 		   intel_crtc->dspaddr_offset);
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 	}
 	}
 }
 }
 
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-			unsigned int rotation)
-{
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct i915_ggtt_view view;
-	struct i915_vma *vma;
-
-	intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-	vma = i915_gem_object_to_ggtt(obj, &view);
-	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-		 view.type))
-		return -1;
-
-	return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_device *dev = intel_crtc->base.dev;
@@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	}
 	}
 
 
 	I915_WRITE(PLANE_SURF(pipe, 0),
 	I915_WRITE(PLANE_SURF(pipe, 0),
-		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
+		   intel_plane_ggtt_offset(plane_state) + surf_addr);
 
 
 	POSTING_READ(PLANE_SURF(pipe, 0));
 	POSTING_READ(PLANE_SURF(pipe, 0));
 }
 }
@@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 		flush_work(&work->mmio_work);
 		flush_work(&work->mmio_work);
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+	intel_unpin_fb_vma(work->old_vma);
 	i915_gem_object_put(work->pending_flip_obj);
 	i915_gem_object_put(work->pending_flip_obj);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
@@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		goto cleanup_pending;
 		goto cleanup_pending;
 	}
 	}
 
 
-	work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-	work->gtt_offset += intel_crtc->dspaddr_offset;
+	work->old_vma = to_intel_plane_state(primary->state)->vma;
+	to_intel_plane_state(primary->state)->vma = vma;
+
+	work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
 	work->rotation = crtc->primary->state->rotation;
 	work->rotation = crtc->primary->state->rotation;
 
 
 	/*
 	/*
@@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
 cleanup_request:
 	i915_add_request_no_flush(request);
 	i915_add_request_no_flush(request);
 cleanup_unpin:
 cleanup_unpin:
-	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+	to_intel_plane_state(primary->state)->vma = work->old_vma;
+	intel_unpin_fb_vma(vma);
 cleanup_pending:
 cleanup_pending:
 	atomic_dec(&intel_crtc->unpin_work_count);
 	atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
 unlock:
@@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 			DRM_DEBUG_KMS("failed to pin object\n");
 			DRM_DEBUG_KMS("failed to pin object\n");
 			return PTR_ERR(vma);
 			return PTR_ERR(vma);
 		}
 		}
+
+		to_intel_plane_state(new_state)->vma = vma;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -14812,19 +14807,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
 intel_cleanup_plane_fb(struct drm_plane *plane,
 		       struct drm_plane_state *old_state)
 		       struct drm_plane_state *old_state)
 {
 {
-	struct drm_i915_private *dev_priv = to_i915(plane->dev);
-	struct intel_plane_state *old_intel_state;
-	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-	old_intel_state = to_intel_plane_state(old_state);
-
-	if (!obj && !old_obj)
-		return;
+	struct i915_vma *vma;
 
 
-	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-	    !INTEL_INFO(dev_priv)->cursor_needs_physical))
-		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+	/* Should only be called after a successful intel_prepare_plane_fb()! */
+	vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+	if (vma)
+		intel_unpin_fb_vma(vma);
 }
 }
 
 
 int
 int
@@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
 	if (!obj)
 	if (!obj)
 		addr = 0;
 		addr = 0;
 	else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
 	else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-		addr = i915_gem_object_ggtt_offset(obj, NULL);
+		addr = intel_plane_ggtt_offset(state);
 	else
 	else
 		addr = obj->phys_handle->busaddr;
 		addr = obj->phys_handle->busaddr;
 
 
@@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *c;
-	struct drm_i915_gem_object *obj;
 
 
 	intel_init_gt_powersave(dev_priv);
 	intel_init_gt_powersave(dev_priv);
 
 
 	intel_modeset_init_hw(dev);
 	intel_modeset_init_hw(dev);
 
 
 	intel_setup_overlay(dev_priv);
 	intel_setup_overlay(dev_priv);
-
-	/*
-	 * Make sure any fbs we allocated at startup are properly
-	 * pinned & fenced.  When we do the allocation it's too early
-	 * for this.
-	 */
-	for_each_crtc(dev, c) {
-		struct i915_vma *vma;
-
-		obj = intel_fb_obj(c->primary->fb);
-		if (obj == NULL)
-			continue;
-
-		mutex_lock(&dev->struct_mutex);
-		vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-						 c->primary->state->rotation);
-		mutex_unlock(&dev->struct_mutex);
-		if (IS_ERR(vma)) {
-			DRM_ERROR("failed to pin boot fb on pipe %d\n",
-				  to_intel_crtc(c)->pipe);
-			drm_framebuffer_unreference(c->primary->fb);
-			c->primary->fb = NULL;
-			c->primary->crtc = c->primary->state->crtc = NULL;
-			update_state_fb(c->primary);
-			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-		}
-	}
 }
 }
 
 
 int intel_connector_register(struct drm_connector *connector)
 int intel_connector_register(struct drm_connector *connector)

+ 7 - 2
drivers/gpu/drm/i915/intel_drv.h

@@ -377,6 +377,7 @@ struct intel_atomic_state {
 struct intel_plane_state {
 struct intel_plane_state {
 	struct drm_plane_state base;
 	struct drm_plane_state base;
 	struct drm_rect clip;
 	struct drm_rect clip;
+	struct i915_vma *vma;
 
 
 	struct {
 	struct {
 		u32 offset;
 		u32 offset;
@@ -1046,6 +1047,7 @@ struct intel_flip_work {
 	struct work_struct mmio_work;
 	struct work_struct mmio_work;
 
 
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
+	struct i915_vma *old_vma;
 	struct drm_framebuffer *old_fb;
 	struct drm_framebuffer *old_fb;
 	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_pending_vblank_event *event;
 	struct drm_pending_vblank_event *event;
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct drm_modeset_acquire_ctx *ctx);
 				    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+	return i915_ggtt_offset(state->vma);
+}
 
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);

+ 20 - 32
drivers/gpu/drm/i915/intel_fbc.c

@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 	if (IS_I945GM(dev_priv))
 	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-	fbc_ctl |= params->fb.fence_reg;
+	fbc_ctl |= params->vma->fence->id;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 }
 
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
 	else
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+	if (params->vma->fence) {
+		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	} else {
 	} else {
 		I915_WRITE(DPFC_FENCE_YOFF, 0);
 		I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 		break;
 	}
 	}
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+	if (params->vma->fence) {
 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
 		if (IS_GEN5(dev_priv))
 		if (IS_GEN5(dev_priv))
-			dpfc_ctl |= params->fb.fence_reg;
+			dpfc_ctl |= params->vma->fence->id;
 		if (IS_GEN6(dev_priv)) {
 		if (IS_GEN6(dev_priv)) {
 			I915_WRITE(SNB_DPFC_CTL_SA,
 			I915_WRITE(SNB_DPFC_CTL_SA,
-				   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+				   SNB_CPU_FENCE_ENABLE |
+				   params->vma->fence->id);
 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
 				   params->crtc.fence_y_offset);
 				   params->crtc.fence_y_offset);
 		}
 		}
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 	}
 	}
 
 
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-	I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_FBC_RT_BASE,
+		   i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 		break;
 	}
 	}
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+	if (params->vma->fence) {
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 		I915_WRITE(SNB_DPFC_CTL_SA,
 		I915_WRITE(SNB_DPFC_CTL_SA,
-			   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+			   SNB_CPU_FENCE_ENABLE |
+			   params->vma->fence->id);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 	} else {
 	} else {
 		I915_WRITE(SNB_DPFC_CTL_SA,0);
 		I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 	return effective_w <= max_w && effective_h <= max_h;
 	return effective_w <= max_w && effective_h <= max_h;
 }
 }
 
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-	struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-	return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 					 struct intel_crtc_state *crtc_state,
 					 struct intel_crtc_state *crtc_state,
 					 struct intel_plane_state *plane_state)
 					 struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj;
+
+	cache->vma = NULL;
 
 
 	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
 	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	if (!cache->plane.visible)
 	if (!cache->plane.visible)
 		return;
 		return;
 
 
-	obj = intel_fb_obj(fb);
-
-	/* FIXME: We lack the proper locking here, so only run this on the
-	 * platforms that need. */
-	if (IS_GEN(dev_priv, 5, 6))
-		cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
 	cache->fb.pixel_format = fb->pixel_format;
 	cache->fb.pixel_format = fb->pixel_format;
 	cache->fb.stride = fb->pitches[0];
 	cache->fb.stride = fb->pitches[0];
-	cache->fb.fence_reg = get_fence_id(fb);
-	cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+	cache->vma = plane_state->vma;
 }
 }
 
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 		return false;
 		return false;
 	}
 	}
 
 
-	if (!cache->plane.visible) {
+	if (!cache->vma) {
 		fbc->no_fbc_reason = "primary plane not visible";
 		fbc->no_fbc_reason = "primary plane not visible";
 		return false;
 		return false;
 	}
 	}
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	 * so have no fence associated with it) due to aperture constaints
 	 * so have no fence associated with it) due to aperture constaints
 	 * at the time of pinning.
 	 * at the time of pinning.
 	 */
 	 */
-	if (cache->fb.tiling_mode != I915_TILING_X ||
-	    cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+	if (!cache->vma->fence) {
 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
 		return false;
 		return false;
 	}
 	}
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 	 * zero. */
 	 * zero. */
 	memset(params, 0, sizeof(*params));
 	memset(params, 0, sizeof(*params));
 
 
+	params->vma = cache->vma;
+
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.plane = crtc->plane;
 	params->crtc.plane = crtc->plane;
 	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
 
 	params->fb.pixel_format = cache->fb.pixel_format;
 	params->fb.pixel_format = cache->fb.pixel_format;
 	params->fb.stride = cache->fb.stride;
 	params->fb.stride = cache->fb.stride;
-	params->fb.fence_reg = cache->fb.fence_reg;
 
 
 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-	params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 }
 
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,

+ 2 - 2
drivers/gpu/drm/i915/intel_fbdev.c

@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
 out_destroy_fbi:
 	drm_fb_helper_release_fbi(helper);
 	drm_fb_helper_release_fbi(helper);
 out_unpin:
 out_unpin:
-	intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+	intel_unpin_fb_vma(vma);
 out_unlock:
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 	return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
 
 	if (ifbdev->fb) {
 	if (ifbdev->fb) {
 		mutex_lock(&ifbdev->helper.dev->struct_mutex);
 		mutex_lock(&ifbdev->helper.dev->struct_mutex);
-		intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+		intel_unpin_fb_vma(ifbdev->vma);
 		mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 		mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
 
 		drm_framebuffer_remove(&ifbdev->fb->base);
 		drm_framebuffer_remove(&ifbdev->fb->base);

+ 4 - 4
drivers/gpu/drm/i915/intel_sprite.c

@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
 
 	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 	I915_WRITE(PLANE_SURF(pipe, plane),
 	I915_WRITE(PLANE_SURF(pipe, plane),
-		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
+		   intel_plane_ggtt_offset(plane_state) + surf_addr);
 	POSTING_READ(PLANE_SURF(pipe, plane));
 	POSTING_READ(PLANE_SURF(pipe, plane));
 }
 }
 
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
 	I915_WRITE(SPSURF(pipe, plane),
 	I915_WRITE(SPSURF(pipe, plane),
-		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 	POSTING_READ(SPSURF(pipe, plane));
 	POSTING_READ(SPSURF(pipe, plane));
 }
 }
 
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRSURF(pipe),
 	I915_WRITE(SPRSURF(pipe),
-		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 	POSTING_READ(SPRSURF(pipe));
 	POSTING_READ(SPRSURF(pipe));
 }
 }
 
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSSURF(pipe),
 	I915_WRITE(DVSSURF(pipe),
-		   intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
 	POSTING_READ(DVSSURF(pipe));
 	POSTING_READ(DVSSURF(pipe));
 }
 }
 
 

+ 2 - 1
drivers/gpu/drm/nouveau/dispnv04/hw.c

@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 		uint32_t mpllP;
 		uint32_t mpllP;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+		mpllP = (mpllP >> 8) & 0xf;
 		if (!mpllP)
 		if (!mpllP)
 			mpllP = 4;
 			mpllP = 4;
 
 
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 		uint32_t clock;
 		uint32_t clock;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
-		return clock;
+		return clock / 1000;
 	}
 	}
 
 
 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_fence.h

@@ -99,6 +99,7 @@ struct nv84_fence_priv {
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo_gart;
 	struct nouveau_bo *bo_gart;
 	u32 *suspend;
 	u32 *suspend;
+	struct mutex mutex;
 };
 };
 
 
 int  nv84_fence_context_new(struct nouveau_channel *);
 int  nv84_fence_context_new(struct nouveau_channel *);

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_led.h

@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
 }
 }
 
 
 /* nouveau_led.c */
 /* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 int  nouveau_led_init(struct drm_device *dev);
 int  nouveau_led_init(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);

+ 2 - 1
drivers/gpu/drm/nouveau/nouveau_usif.c

@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
 	if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
 	if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
 		/* block access to objects not created via this interface */
 		/* block access to objects not created via this interface */
 		owner = argv->v0.owner;
 		owner = argv->v0.owner;
-		if (argv->v0.object == 0ULL)
+		if (argv->v0.object == 0ULL &&
+		    argv->v0.type != NVIF_IOCTL_V0_DEL)
 			argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
 			argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
 		else
 		else
 			argv->v0.owner = NVDRM_OBJECT_USIF;
 			argv->v0.owner = NVDRM_OBJECT_USIF;

+ 6 - 0
drivers/gpu/drm/nouveau/nv50_display.c

@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 		}
 	}
 	}
 
 
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (crtc->state->event)
+			drm_crtc_vblank_get(crtc);
+	}
+
 	/* Update plane(s). */
 	/* Update plane(s). */
 	for_each_plane_in_state(state, plane, plane_state, i) {
 	for_each_plane_in_state(state, plane, plane_state, i) {
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
 			spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 			spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 			crtc->state->event = NULL;
 			crtc->state->event = NULL;
+			drm_crtc_vblank_put(crtc);
 		}
 		}
 	}
 	}
 
 

+ 6 - 0
drivers/gpu/drm/nouveau/nv84_fence.c

@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
 	struct nv84_fence_chan *fctx = chan->fence;
 	struct nv84_fence_chan *fctx = chan->fence;
 
 
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+	mutex_lock(&priv->mutex);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
+	mutex_unlock(&priv->mutex);
 	nouveau_fence_context_del(&fctx->base);
 	nouveau_fence_context_del(&fctx->base);
 	chan->fence = NULL;
 	chan->fence = NULL;
 	nouveau_fence_context_free(&fctx->base);
 	nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
 	fctx->base.sync32 = nv84_fence_sync32;
 	fctx->base.sync32 = nv84_fence_sync32;
 	fctx->base.sequence = nv84_fence_read(chan);
 	fctx->base.sequence = nv84_fence_read(chan);
 
 
+	mutex_lock(&priv->mutex);
 	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 	if (ret == 0) {
 	if (ret == 0) {
 		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 					&fctx->vma_gart);
 					&fctx->vma_gart);
 	}
 	}
+	mutex_unlock(&priv->mutex);
 
 
 	if (ret)
 	if (ret)
 		nv84_fence_context_del(chan);
 		nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
 	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 	priv->base.uevent = true;
 
 
+	mutex_init(&priv->mutex);
+
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
 	domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 	domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 			 /*
 			 /*

+ 1 - 1
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c

@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
 			);
 			);
 		}
 		}
 		for (i = 0; i < size; i++)
 		for (i = 0; i < size; i++)
-			nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
 		for (; i < 0x60; i++)
 		for (; i < 0x60; i++)
 			nvkm_wr32(device, 0x61c440 + soff, (i << 8));
 			nvkm_wr32(device, 0x61c440 + soff, (i << 8));
 		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
 		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);

+ 0 - 2
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c

@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
 	case 0x94:
 	case 0x94:
 	case 0x96:
 	case 0x96:
 	case 0x98:
 	case 0x98:
-	case 0xaa:
-	case 0xac:
 		return true;
 		return true;
 	default:
 	default:
 		break;
 		break;

+ 2 - 1
drivers/gpu/drm/radeon/radeon_drv.c

@@ -97,9 +97,10 @@
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
  */
 #define KMS_DRIVER_MAJOR	2
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	48
+#define KMS_DRIVER_MINOR	49
 #define KMS_DRIVER_PATCHLEVEL	0
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
 int radeon_driver_unload_kms(struct drm_device *dev);

+ 2 - 2
drivers/gpu/drm/radeon/radeon_gem.c

@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 
 
 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
 
-	args->vram_size = rdev->mc.real_vram_size;
-	args->vram_visible = (u64)man->size << PAGE_SHIFT;
+	args->vram_size = (u64)man->size << PAGE_SHIFT;
+	args->vram_visible = rdev->mc.visible_vram_size;
 	args->vram_visible -= rdev->vram_pin_size;
 	args->vram_visible -= rdev->vram_pin_size;
 	args->gart_size = rdev->mc.gtt_size;
 	args->gart_size = rdev->mc.gtt_size;
 	args->gart_size -= rdev->gart_pin_size;
 	args->gart_size -= rdev->gart_pin_size;

+ 1 - 0
drivers/hv/ring_buffer.c

@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 		return ret;
 		return ret;
 	}
 	}
 
 
+	init_cached_read_index(channel);
 	next_read_location = hv_get_next_read_location(inring_info);
 	next_read_location = hv_get_next_read_location(inring_info);
 	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
 	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
 						    sizeof(desc),
 						    sizeof(desc),

+ 2 - 2
drivers/iio/adc/palmas_gpadc.c

@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
 
 
 static int palmas_gpadc_suspend(struct device *dev)
 static int palmas_gpadc_suspend(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int ret;
 	int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
 
 
 static int palmas_gpadc_resume(struct device *dev)
 static int palmas_gpadc_resume(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int ret;
 	int ret;

+ 2 - 2
drivers/iio/health/afe4403.c

@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
 
 
 static int __maybe_unused afe4403_suspend(struct device *dev)
 static int __maybe_unused afe4403_suspend(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
 
 
 static int __maybe_unused afe4403_resume(struct device *dev)
 static int __maybe_unused afe4403_resume(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 

+ 2 - 2
drivers/iio/health/afe4404.c

@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
 
 
 static int __maybe_unused afe4404_suspend(struct device *dev)
 static int __maybe_unused afe4404_suspend(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
 
 
 static int __maybe_unused afe4404_resume(struct device *dev)
 static int __maybe_unused afe4404_resume(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 

+ 1 - 1
drivers/iio/health/max30100.c

@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
 
 
 	mutex_lock(&data->lock);
 	mutex_lock(&data->lock);
 
 
-	while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+	while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
 		ret = max30100_read_measurement(data);
 		ret = max30100_read_measurement(data);
 		if (ret)
 		if (ret)
 			break;
 			break;

+ 4 - 2
drivers/iio/humidity/dht11.c

@@ -71,7 +71,8 @@
  * a) select an implementation using busy loop polling on those systems
  * a) select an implementation using busy loop polling on those systems
  * b) use the checksum to do some probabilistic decoding
  * b) use the checksum to do some probabilistic decoding
  */
  */
-#define DHT11_START_TRANSMISSION	18  /* ms */
+#define DHT11_START_TRANSMISSION_MIN	18000  /* us */
+#define DHT11_START_TRANSMISSION_MAX	20000  /* us */
 #define DHT11_MIN_TIMERES	34000  /* ns */
 #define DHT11_MIN_TIMERES	34000  /* ns */
 #define DHT11_THRESHOLD		49000  /* ns */
 #define DHT11_THRESHOLD		49000  /* ns */
 #define DHT11_AMBIG_LOW		23000  /* ns */
 #define DHT11_AMBIG_LOW		23000  /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
 		ret = gpio_direction_output(dht11->gpio, 0);
 		ret = gpio_direction_output(dht11->gpio, 0);
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;
-		msleep(DHT11_START_TRANSMISSION);
+		usleep_range(DHT11_START_TRANSMISSION_MIN,
+			     DHT11_START_TRANSMISSION_MAX);
 		ret = gpio_direction_input(dht11->gpio);
 		ret = gpio_direction_input(dht11->gpio);
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;

+ 4 - 4
drivers/md/dm-crypt.c

@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
 		return PTR_ERR(key);
 		return PTR_ERR(key);
 	}
 	}
 
 
-	rcu_read_lock();
+	down_read(&key->sem);
 
 
 	ukp = user_key_payload(key);
 	ukp = user_key_payload(key);
 	if (!ukp) {
 	if (!ukp) {
-		rcu_read_unlock();
+		up_read(&key->sem);
 		key_put(key);
 		key_put(key);
 		kzfree(new_key_string);
 		kzfree(new_key_string);
 		return -EKEYREVOKED;
 		return -EKEYREVOKED;
 	}
 	}
 
 
 	if (cc->key_size != ukp->datalen) {
 	if (cc->key_size != ukp->datalen) {
-		rcu_read_unlock();
+		up_read(&key->sem);
 		key_put(key);
 		key_put(key);
 		kzfree(new_key_string);
 		kzfree(new_key_string);
 		return -EINVAL;
 		return -EINVAL;
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
 
 
 	memcpy(cc->key, ukp->data, cc->key_size);
 	memcpy(cc->key, ukp->data, cc->key_size);
 
 
-	rcu_read_unlock();
+	up_read(&key->sem);
 	key_put(key);
 	key_put(key);
 
 
 	/* clear the flag since following operations may invalidate previously valid key */
 	/* clear the flag since following operations may invalidate previously valid key */

+ 2 - 2
drivers/md/dm-mpath.c

@@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 	unsigned long flags;
 	unsigned long flags;
 	struct priority_group *pg;
 	struct priority_group *pg;
 	struct pgpath *pgpath;
 	struct pgpath *pgpath;
-	bool bypassed = true;
+	unsigned bypassed = 1;
 
 
 	if (!atomic_read(&m->nr_valid_paths)) {
 	if (!atomic_read(&m->nr_valid_paths)) {
 		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 		clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -466,7 +466,7 @@ check_current_pg:
 	 */
 	 */
 	do {
 	do {
 		list_for_each_entry(pg, &m->priority_groups, list) {
 		list_for_each_entry(pg, &m->priority_groups, list) {
-			if (pg->bypassed == bypassed)
+			if (pg->bypassed == !!bypassed)
 				continue;
 				continue;
 			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 			if (!IS_ERR_OR_NULL(pgpath)) {
 			if (!IS_ERR_OR_NULL(pgpath)) {

+ 4 - 0
drivers/md/dm-rq.c

@@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q)
 		int srcu_idx;
 		int srcu_idx;
 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
 
 
+		if (unlikely(!map)) {
+			dm_put_live_table(md, srcu_idx);
+			return;
+		}
 		ti = dm_table_find_target(map, pos);
 		ti = dm_table_find_target(map, pos);
 		dm_put_live_table(md, srcu_idx);
 		dm_put_live_table(md, srcu_idx);
 	}
 	}

+ 1 - 1
drivers/media/cec/cec-adap.c

@@ -1206,7 +1206,7 @@ static int cec_config_thread_func(void *arg)
 		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 		if (last_la == CEC_LOG_ADDR_INVALID ||
 		if (last_la == CEC_LOG_ADDR_INVALID ||
 		    last_la == CEC_LOG_ADDR_UNREGISTERED ||
 		    last_la == CEC_LOG_ADDR_UNREGISTERED ||
-		    !(last_la & type2mask[type]))
+		    !((1 << last_la) & type2mask[type]))
 			last_la = la_list[0];
 			last_la = la_list[0];
 
 
 		err = cec_config_log_addr(adap, i, last_la);
 		err = cec_config_log_addr(adap, i, last_la);

+ 2 - 1
drivers/mmc/host/sdhci.c

@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
 		if (intmask & SDHCI_INT_RETUNE)
 		if (intmask & SDHCI_INT_RETUNE)
 			mmc_retune_needed(host->mmc);
 			mmc_retune_needed(host->mmc);
 
 
-		if (intmask & SDHCI_INT_CARD_INT) {
+		if ((intmask & SDHCI_INT_CARD_INT) &&
+		    (host->ier & SDHCI_INT_CARD_INT)) {
 			sdhci_enable_sdio_irq_nolock(host, false);
 			sdhci_enable_sdio_irq_nolock(host, false);
 			host->thread_isr |= SDHCI_INT_CARD_INT;
 			host->thread_isr |= SDHCI_INT_CARD_INT;
 			result = IRQ_WAKE_THREAD;
 			result = IRQ_WAKE_THREAD;

+ 2 - 6
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h

@@ -1014,9 +1014,7 @@
 
 
 static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 {
 {
-	u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-	writel(value, reg_addr + reg);
+	writel(value, base + reg);
 }
 }
 
 
 #define dsaf_write_dev(a, reg, value) \
 #define dsaf_write_dev(a, reg, value) \
@@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 
 
 static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
 static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
 {
 {
-	u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-	return readl(reg_addr + reg);
+	return readl(base + reg);
 }
 }
 
 
 static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
 static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)

+ 2 - 2
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c

@@ -1113,7 +1113,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
 	memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
 	memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
 	new_prof.tx_ring_size = tx_size;
 	new_prof.tx_ring_size = tx_size;
 	new_prof.rx_ring_size = rx_size;
 	new_prof.rx_ring_size = rx_size;
-	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 
@@ -1788,7 +1788,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
 	new_prof.tx_ring_num[TX_XDP] = xdp_count;
 	new_prof.tx_ring_num[TX_XDP] = xdp_count;
 	new_prof.rx_ring_num = channel->rx_count;
 	new_prof.rx_ring_num = channel->rx_count;
 
 
-	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 

+ 25 - 10
drivers/net/ethernet/mellanox/mlx4/en_netdev.c

@@ -2048,6 +2048,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 			if (priv->tx_cq[t] && priv->tx_cq[t][i])
 			if (priv->tx_cq[t] && priv->tx_cq[t][i])
 				mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
 				mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
 		}
 		}
+		kfree(priv->tx_ring[t]);
+		kfree(priv->tx_cq[t]);
 	}
 	}
 
 
 	for (i = 0; i < priv->rx_ring_num; i++) {
 	for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2190,9 +2192,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
 
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
 				struct mlx4_en_priv *tmp,
 				struct mlx4_en_priv *tmp,
-				struct mlx4_en_port_profile *prof)
+				struct mlx4_en_port_profile *prof,
+				bool carry_xdp_prog)
 {
 {
-	int t;
+	struct bpf_prog *xdp_prog;
+	int i, t;
 
 
 	mlx4_en_copy_priv(tmp, priv, prof);
 	mlx4_en_copy_priv(tmp, priv, prof);
 
 
@@ -2206,6 +2210,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
 		}
 		}
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
+
+	/* All rx_rings has the same xdp_prog.  Pick the first one. */
+	xdp_prog = rcu_dereference_protected(
+		priv->rx_ring[0]->xdp_prog,
+		lockdep_is_held(&priv->mdev->state_lock));
+
+	if (xdp_prog && carry_xdp_prog) {
+		xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
+		if (IS_ERR(xdp_prog)) {
+			mlx4_en_free_resources(tmp);
+			return PTR_ERR(xdp_prog);
+		}
+		for (i = 0; i < tmp->rx_ring_num; i++)
+			rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+					   xdp_prog);
+	}
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2220,7 +2241,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 {
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_dev *mdev = priv->mdev;
-	int t;
 
 
 	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
 
@@ -2254,11 +2274,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 	mlx4_en_free_resources(priv);
 	mlx4_en_free_resources(priv);
 	mutex_unlock(&mdev->state_lock);
 	mutex_unlock(&mdev->state_lock);
 
 
-	for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
-		kfree(priv->tx_ring[t]);
-		kfree(priv->tx_cq[t]);
-	}
-
 	free_netdev(dev);
 	free_netdev(dev);
 }
 }
 
 
@@ -2761,7 +2776,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
 		en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
 		en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
 	}
 	}
 
 
-	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
 	if (err) {
 	if (err) {
 		if (prog)
 		if (prog)
 			bpf_prog_sub(prog, priv->rx_ring_num - 1);
 			bpf_prog_sub(prog, priv->rx_ring_num - 1);
@@ -3505,7 +3520,7 @@ int mlx4_en_reset_config(struct net_device *dev,
 	memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
 	memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
 	memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
 	memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
 
 
-	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+	err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 

+ 4 - 1
drivers/net/ethernet/mellanox/mlx4/en_rx.c

@@ -515,8 +515,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
 		return;
 		return;
 
 
 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
-		if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+		if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+			local_bh_disable();
 			napi_reschedule(&priv->rx_cq[ring]->napi);
 			napi_reschedule(&priv->rx_cq[ring]->napi);
+			local_bh_enable();
+		}
 	}
 	}
 }
 }
 
 

+ 2 - 1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

@@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
 
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
 				struct mlx4_en_priv *tmp,
 				struct mlx4_en_priv *tmp,
-				struct mlx4_en_port_profile *prof);
+				struct mlx4_en_port_profile *prof,
+				bool carry_xdp_prog);
 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
 				    struct mlx4_en_priv *tmp);
 				    struct mlx4_en_priv *tmp);
 
 

+ 3 - 0
drivers/net/hyperv/netvsc.c

@@ -1254,6 +1254,9 @@ void netvsc_channel_cb(void *context)
 	    netvsc_channel_idle(net_device, q_idx))
 	    netvsc_channel_idle(net_device, q_idx))
 		return;
 		return;
 
 
+	/* commit_rd_index() -> hv_signal_on_read() needs this. */
+	init_cached_read_index(channel);
+
 	while ((desc = get_next_pkt_raw(channel)) != NULL) {
 	while ((desc = get_next_pkt_raw(channel)) != NULL) {
 		netvsc_process_raw_pkt(device, channel, net_device,
 		netvsc_process_raw_pkt(device, channel, net_device,
 				       ndev, desc->trans_id, desc);
 				       ndev, desc->trans_id, desc);

+ 2 - 2
drivers/net/macvtap.c

@@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
 	size_t linear;
 	size_t linear;
 
 
 	if (q->flags & IFF_VNET_HDR) {
 	if (q->flags & IFF_VNET_HDR) {
-		vnet_hdr_len = q->vnet_hdr_sz;
+		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
 
 
 		err = -EINVAL;
 		err = -EINVAL;
 		if (len < vnet_hdr_len)
 		if (len < vnet_hdr_len)
@@ -820,7 +820,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 
 
 	if (q->flags & IFF_VNET_HDR) {
 	if (q->flags & IFF_VNET_HDR) {
 		struct virtio_net_hdr vnet_hdr;
 		struct virtio_net_hdr vnet_hdr;
-		vnet_hdr_len = q->vnet_hdr_sz;
+		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
 		if (iov_iter_count(iter) < vnet_hdr_len)
 		if (iov_iter_count(iter) < vnet_hdr_len)
 			return -EINVAL;
 			return -EINVAL;
 
 

+ 7 - 0
drivers/net/phy/phy_device.c

@@ -920,6 +920,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
+	if (!try_module_get(d->driver->owner)) {
+		dev_err(&dev->dev, "failed to get the device driver module\n");
+		return -EIO;
+	}
+
 	get_device(d);
 	get_device(d);
 
 
 	/* Assume that if there is no driver, that it doesn't
 	/* Assume that if there is no driver, that it doesn't
@@ -977,6 +982,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 error:
 error:
 	phy_detach(phydev);
 	phy_detach(phydev);
 	put_device(d);
 	put_device(d);
+	module_put(d->driver->owner);
 	if (ndev_owner != bus->owner)
 	if (ndev_owner != bus->owner)
 		module_put(bus->owner);
 		module_put(bus->owner);
 	return err;
 	return err;
@@ -1059,6 +1065,7 @@ void phy_detach(struct phy_device *phydev)
 	bus = phydev->mdio.bus;
 	bus = phydev->mdio.bus;
 
 
 	put_device(&phydev->mdio.dev);
 	put_device(&phydev->mdio.dev);
+	module_put(phydev->mdio.dev.driver->owner);
 	if (ndev_owner != bus->owner)
 	if (ndev_owner != bus->owner)
 		module_put(bus->owner);
 		module_put(bus->owner);
 }
 }

+ 6 - 4
drivers/net/tun.c

@@ -1207,9 +1207,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 	}
 	}
 
 
 	if (tun->flags & IFF_VNET_HDR) {
 	if (tun->flags & IFF_VNET_HDR) {
-		if (len < tun->vnet_hdr_sz)
+		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+		if (len < vnet_hdr_sz)
 			return -EINVAL;
 			return -EINVAL;
-		len -= tun->vnet_hdr_sz;
+		len -= vnet_hdr_sz;
 
 
 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
 			return -EFAULT;
 			return -EFAULT;
@@ -1220,7 +1222,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
 
 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
 			return -EINVAL;
 			return -EINVAL;
-		iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
 	}
 	}
 
 
 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1371,7 +1373,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
 		vlan_hlen = VLAN_HLEN;
 		vlan_hlen = VLAN_HLEN;
 
 
 	if (tun->flags & IFF_VNET_HDR)
 	if (tun->flags & IFF_VNET_HDR)
-		vnet_hdr_sz = tun->vnet_hdr_sz;
+		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
 
 
 	total = skb->len + vlan_hlen + vnet_hdr_sz;
 	total = skb->len + vlan_hlen + vnet_hdr_sz;
 
 

部分文件因为文件数量过多而无法显示