Эх сурвалжийг харах

nfsd: merge stable fix into main nfsd branch

J. Bruce Fields 8 жил өмнө
parent
commit
60709c093e
100 өөрчлөгдсөн 1007 нэмэгдсэн , 842 устгасан
  1. 1 2
      MAINTAINERS
  2. 2 2
      Makefile
  3. 42 46
      arch/arm64/crypto/aes-modes.S
  4. 1 1
      arch/powerpc/Kconfig
  5. 2 0
      arch/powerpc/include/asm/cpu_has_feature.h
  6. 2 0
      arch/powerpc/include/asm/mmu.h
  7. 0 4
      arch/powerpc/include/asm/module.h
  8. 0 40
      arch/powerpc/include/asm/stackprotector.h
  9. 0 4
      arch/powerpc/kernel/Makefile
  10. 0 3
      arch/powerpc/kernel/asm-offsets.c
  11. 1 1
      arch/powerpc/kernel/eeh_driver.c
  12. 1 5
      arch/powerpc/kernel/entry_32.S
  13. 0 8
      arch/powerpc/kernel/module_64.c
  14. 0 6
      arch/powerpc/kernel/process.c
  15. 3 0
      arch/powerpc/kernel/prom_init.c
  16. 2 2
      arch/powerpc/mm/pgtable-radix.c
  17. 4 4
      arch/sparc/include/asm/mmu_context_64.h
  18. 1 1
      arch/sparc/kernel/irq_64.c
  19. 3 3
      arch/sparc/kernel/sstate.c
  20. 73 0
      arch/sparc/kernel/traps_64.c
  21. 26 34
      arch/x86/events/intel/rapl.c
  22. 91 141
      arch/x86/events/intel/uncore.c
  23. 1 0
      arch/x86/include/asm/microcode.h
  24. 2 0
      arch/x86/kernel/apic/io_apic.c
  25. 12 19
      arch/x86/kernel/cpu/mcheck/mce.c
  26. 3 2
      arch/x86/kernel/cpu/microcode/amd.c
  27. 17 5
      arch/x86/kernel/cpu/microcode/core.c
  28. 1 8
      arch/x86/kernel/cpu/microcode/intel.c
  29. 3 1
      arch/x86/kernel/fpu/core.c
  30. 1 0
      arch/x86/kernel/hpet.c
  31. 1 0
      arch/x86/kvm/x86.c
  32. 16 0
      arch/x86/platform/efi/efi_64.c
  33. 1 1
      arch/xtensa/kernel/setup.c
  34. 1 0
      crypto/algapi.c
  35. 4 2
      drivers/ata/libata-core.c
  36. 3 0
      drivers/ata/sata_mv.c
  37. 1 4
      drivers/base/firmware_class.c
  38. 6 6
      drivers/base/memory.c
  39. 3 0
      drivers/bcma/bcma_private.h
  40. 3 8
      drivers/bcma/driver_chipcommon.c
  41. 3 0
      drivers/bcma/driver_mips.c
  42. 46 23
      drivers/dma/cppi41.c
  43. 6 13
      drivers/dma/pl330.c
  44. 3 11
      drivers/firmware/efi/libstub/fdt.c
  45. 3 1
      drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
  46. 8 5
      drivers/gpu/drm/drm_atomic.c
  47. 0 9
      drivers/gpu/drm/drm_atomic_helper.c
  48. 18 5
      drivers/gpu/drm/drm_connector.c
  49. 4 0
      drivers/gpu/drm/drm_drv.c
  50. 4 12
      drivers/gpu/drm/i915/i915_drv.h
  51. 20 0
      drivers/gpu/drm/i915/intel_atomic_plane.c
  52. 42 83
      drivers/gpu/drm/i915/intel_display.c
  53. 7 2
      drivers/gpu/drm/i915/intel_drv.h
  54. 20 32
      drivers/gpu/drm/i915/intel_fbc.c
  55. 2 2
      drivers/gpu/drm/i915/intel_fbdev.c
  56. 4 4
      drivers/gpu/drm/i915/intel_sprite.c
  57. 2 1
      drivers/gpu/drm/nouveau/dispnv04/hw.c
  58. 1 0
      drivers/gpu/drm/nouveau/nouveau_fence.h
  59. 1 1
      drivers/gpu/drm/nouveau/nouveau_led.h
  60. 2 1
      drivers/gpu/drm/nouveau/nouveau_usif.c
  61. 6 0
      drivers/gpu/drm/nouveau/nv50_display.c
  62. 6 0
      drivers/gpu/drm/nouveau/nv84_fence.c
  63. 1 1
      drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
  64. 0 2
      drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
  65. 2 1
      drivers/gpu/drm/radeon/radeon_drv.c
  66. 2 2
      drivers/gpu/drm/radeon/radeon_gem.c
  67. 12 16
      drivers/hid/hid-cp2112.c
  68. 3 0
      drivers/hid/hid-ids.h
  69. 1 1
      drivers/hid/hid-lg.c
  70. 1 0
      drivers/hid/usbhid/hid-quirks.c
  71. 15 13
      drivers/hid/wacom_wac.c
  72. 1 0
      drivers/hv/ring_buffer.c
  73. 2 2
      drivers/iio/adc/palmas_gpadc.c
  74. 2 2
      drivers/iio/health/afe4403.c
  75. 2 2
      drivers/iio/health/afe4404.c
  76. 1 1
      drivers/iio/health/max30100.c
  77. 4 2
      drivers/iio/humidity/dht11.c
  78. 2 2
      drivers/input/rmi4/rmi_driver.c
  79. 1 1
      drivers/input/touchscreen/wm97xx-core.c
  80. 2 1
      drivers/mmc/host/sdhci.c
  81. 43 2
      drivers/net/ethernet/adaptec/starfire.c
  82. 130 58
      drivers/net/ethernet/cadence/macb.c
  83. 17 3
      drivers/net/ethernet/cadence/macb.h
  84. 1 2
      drivers/net/ethernet/cavium/thunder/thunder_xcv.c
  85. 28 5
      drivers/net/ethernet/emulex/benet/be_main.c
  86. 2 2
      drivers/net/ethernet/freescale/gianfar.c
  87. 1 1
      drivers/net/ethernet/mellanox/mlx4/catas.c
  88. 12 0
      drivers/net/ethernet/mellanox/mlx4/intf.c
  89. 1 0
      drivers/net/ethernet/mellanox/mlx4/mlx4.h
  90. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/cmd.c
  91. 4 3
      drivers/net/ethernet/mellanox/mlx5/core/en.h
  92. 8 3
      drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
  93. 26 15
      drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
  94. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
  95. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
  96. 101 101
      drivers/net/ethernet/mellanox/mlx5/core/en_main.c
  97. 6 7
      drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
  98. 5 5
      drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
  99. 22 14
      drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
  100. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c

+ 1 - 2
MAINTAINERS

@@ -10195,7 +10195,6 @@ F:	drivers/media/tuners/qt1010*
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
-L:	ath9k-devel@lists.ath9k.org
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 S:	Supported
 S:	Supported
 F:	drivers/net/wireless/ath/ath9k/
 F:	drivers/net/wireless/ath/ath9k/
@@ -13066,7 +13065,7 @@ F:	drivers/input/serio/userio.c
 F:	include/uapi/linux/userio.h
 F:	include/uapi/linux/userio.h
 
 
 VIRTIO CONSOLE DRIVER
 VIRTIO CONSOLE DRIVER
-M:	Amit Shah <amit.shah@redhat.com>
+M:	Amit Shah <amit@kernel.org>
 L:	virtualization@lists.linux-foundation.org
 L:	virtualization@lists.linux-foundation.org
 S:	Maintained
 S:	Maintained
 F:	drivers/char/virtio_console.c
 F:	drivers/char/virtio_console.c

+ 2 - 2
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 10
 PATCHLEVEL = 10
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -797,7 +797,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 
 # check for 'asm goto'
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 endif

+ 42 - 46
arch/arm64/crypto/aes-modes.S

@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
 	cbz		w6, .Lcbcencloop
 	cbz		w6, .Lcbcencloop
 
 
 	ld1		{v0.16b}, [x5]			/* get iv */
 	ld1		{v0.16b}, [x5]			/* get iv */
-	enc_prepare	w3, x2, x5
+	enc_prepare	w3, x2, x6
 
 
 .Lcbcencloop:
 .Lcbcencloop:
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
-	encrypt_block	v0, w3, x2, x5, w6
+	encrypt_block	v0, w3, x2, x6, w7
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bne		.Lcbcencloop
 	bne		.Lcbcencloop
+	st1		{v0.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_encrypt)
 AES_ENDPROC(aes_cbc_encrypt)
 
 
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	cbz		w6, .LcbcdecloopNx
 	cbz		w6, .LcbcdecloopNx
 
 
 	ld1		{v7.16b}, [x5]			/* get iv */
 	ld1		{v7.16b}, [x5]			/* get iv */
-	dec_prepare	w3, x2, x5
+	dec_prepare	w3, x2, x6
 
 
 .LcbcdecloopNx:
 .LcbcdecloopNx:
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
 .Lcbcdecloop:
 .Lcbcdecloop:
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
-	decrypt_block	v0, w3, x2, x5, w6
+	decrypt_block	v0, w3, x2, x6, w7
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	bne		.Lcbcdecloop
 	bne		.Lcbcdecloop
 .Lcbcdecout:
 .Lcbcdecout:
 	FRAME_POP
 	FRAME_POP
+	st1		{v7.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_decrypt)
 AES_ENDPROC(aes_cbc_decrypt)
 
 
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
 
 
 AES_ENTRY(aes_ctr_encrypt)
 AES_ENTRY(aes_ctr_encrypt)
 	FRAME_PUSH
 	FRAME_PUSH
-	cbnz		w6, .Lctrfirst		/* 1st time around? */
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
-#if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
-	bcs		.Lctrinc
-	add		x5, x5, #1		/* increment BE ctr */
-	b		.LctrincNx
-#else
-	b		.Lctrinc
-#endif
-.Lctrfirst:
+	cbz		w6, .Lctrnotfirst	/* 1st time around? */
 	enc_prepare	w3, x2, x6
 	enc_prepare	w3, x2, x6
 	ld1		{v4.16b}, [x5]
 	ld1		{v4.16b}, [x5]
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
+
+.Lctrnotfirst:
+	umov		x8, v4.d[1]		/* keep swabbed ctr in reg */
+	rev		x8, x8
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
+	cmn		w8, w4			/* 32 bit overflow? */
 	bcs		.Lctrloop
 	bcs		.Lctrloop
 .LctrloopNx:
 .LctrloopNx:
 	subs		w4, w4, #INTERLEAVE
 	subs		w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
 #if INTERLEAVE == 2
 #if INTERLEAVE == 2
 	mov		v0.8b, v4.8b
 	mov		v0.8b, v4.8b
 	mov		v1.8b, v4.8b
 	mov		v1.8b, v4.8b
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v0.d[1], x7
 	ins		v0.d[1], x7
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v1.d[1], x7
 	ins		v1.d[1], x7
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	do_encrypt_block2x
 	do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
 	st1		{v0.16b-v1.16b}, [x0], #32
 	st1		{v0.16b-v1.16b}, [x0], #32
 #else
 #else
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
-	dup		v7.4s, w5
+	dup		v7.4s, w8
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	add		v7.4s, v7.4s, v8.4s
 	add		v7.4s, v7.4s, v8.4s
 	mov		v1.16b, v4.16b
 	mov		v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v3.16b, v5.16b, v3.16b
 	eor		v3.16b, v5.16b, v3.16b
 	st1		{v0.16b-v3.16b}, [x0], #64
 	st1		{v0.16b-v3.16b}, [x0], #64
-	add		x5, x5, #INTERLEAVE
+	add		x8, x8, #INTERLEAVE
 #endif
 #endif
-	cbz		w4, .LctroutNx
-.LctrincNx:
-	rev		x7, x5
+	rev		x7, x8
 	ins		v4.d[1], x7
 	ins		v4.d[1], x7
+	cbz		w4, .Lctrout
 	b		.LctrloopNx
 	b		.LctrloopNx
-.LctroutNx:
-	sub		x5, x5, #1
-	rev		x7, x5
-	ins		v4.d[1], x7
-	b		.Lctrout
 .Lctr1x:
 .Lctr1x:
 	adds		w4, w4, #INTERLEAVE
 	adds		w4, w4, #INTERLEAVE
 	beq		.Lctrout
 	beq		.Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
 .Lctrloop:
 .Lctrloop:
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	encrypt_block	v0, w3, x2, x6, w7
 	encrypt_block	v0, w3, x2, x6, w7
+
+	adds		x8, x8, #1		/* increment BE ctr */
+	rev		x7, x8
+	ins		v4.d[1], x7
+	bcs		.Lctrcarry		/* overflow? */
+
+.Lctrcarrydone:
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	ld1		{v3.16b}, [x1], #16
 	ld1		{v3.16b}, [x1], #16
 	eor		v3.16b, v0.16b, v3.16b
 	eor		v3.16b, v0.16b, v3.16b
 	st1		{v3.16b}, [x0], #16
 	st1		{v3.16b}, [x0], #16
-	beq		.Lctrout
-.Lctrinc:
-	adds		x5, x5, #1		/* increment BE ctr */
-	rev		x7, x5
-	ins		v4.d[1], x7
-	bcc		.Lctrloop		/* no overflow? */
-	umov		x7, v4.d[0]		/* load upper word of ctr  */
-	rev		x7, x7			/* ... to handle the carry */
-	add		x7, x7, #1
-	rev		x7, x7
-	ins		v4.d[0], x7
-	b		.Lctrloop
+	bne		.Lctrloop
+
+.Lctrout:
+	st1		{v4.16b}, [x5]		/* return next CTR value */
+	FRAME_POP
+	ret
+
 .Lctrhalfblock:
 .Lctrhalfblock:
 	ld1		{v3.8b}, [x1]
 	ld1		{v3.8b}, [x1]
 	eor		v3.8b, v0.8b, v3.8b
 	eor		v3.8b, v0.8b, v3.8b
 	st1		{v3.8b}, [x0]
 	st1		{v3.8b}, [x0]
-.Lctrout:
 	FRAME_POP
 	FRAME_POP
 	ret
 	ret
+
+.Lctrcarry:
+	umov		x7, v4.d[0]		/* load upper word of ctr  */
+	rev		x7, x7			/* ... to handle the carry */
+	add		x7, x7, #1
+	rev		x7, x7
+	ins		v4.d[0], x7
+	b		.Lctrcarrydone
 AES_ENDPROC(aes_ctr_encrypt)
 AES_ENDPROC(aes_ctr_encrypt)
 	.ltorg
 	.ltorg
 
 

+ 1 - 1
arch/powerpc/Kconfig

@@ -164,7 +164,6 @@ config PPC
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_GZIP
-	select HAVE_CC_STACKPROTECTOR
 
 
 config GENERIC_CSUM
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
 	def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
 	bool "Build a relocatable kernel"
 	bool "Build a relocatable kernel"
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
 	select NONSTATIC_KERNEL
 	select NONSTATIC_KERNEL
+	select MODULE_REL_CRCS if MODVERSIONS
 	help
 	help
 	  This builds a kernel image that is capable of running at the
 	  This builds a kernel image that is capable of running at the
 	  location the kernel is loaded at. For ppc32, there is no any
 	  location the kernel is loaded at. For ppc32, there is no any

+ 2 - 0
arch/powerpc/include/asm/cpu_has_feature.h

@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 2 - 0
arch/powerpc/include/asm/mmu.h

@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
 {
 	int i;
 	int i;
 
 
+#ifndef __clang__ /* clang can't cope with this */
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
 	BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
 	if (!static_key_initialized) {
 	if (!static_key_initialized) {

+ 0 - 4
arch/powerpc/include/asm/module.h

@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 }
 #endif
 #endif
 
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif	/* _ASM_POWERPC_MODULE_H */
 #endif	/* _ASM_POWERPC_MODULE_H */

+ 0 - 40
arch/powerpc/include/asm/stackprotector.h

@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-	unsigned long canary;
-
-	/* Try to get a semi random initial value. */
-	get_random_bytes(&canary, sizeof(canary));
-	canary ^= mftb();
-	canary ^= LINUX_VERSION_CODE;
-
-	current->stack_canary = canary;
-	__stack_chk_guard = current->stack_canary;
-}
-
-#endif	/* _ASM_STACKPROTECTOR_H */

+ 0 - 4
arch/powerpc/kernel/Makefile

@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)

+ 0 - 3
arch/powerpc/kernel/asm-offsets.c

@@ -91,9 +91,6 @@ int main(void)
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 #endif
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-	DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
 #ifdef CONFIG_BOOKE

+ 1 - 1
arch/powerpc/kernel/eeh_driver.c

@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
 {
 	struct eeh_pe *pe = (struct eeh_pe *)data;
 	struct eeh_pe *pe = (struct eeh_pe *)data;
-	bool *clear_sw_state = flag;
+	bool clear_sw_state = *(bool *)flag;
 	int i, rc = 1;
 	int i, rc = 1;
 
 
 	for (i = 0; rc && i < 3; i++)
 	for (i = 0; rc && i < 3; i++)

+ 1 - 5
arch/powerpc/kernel/entry_32.S

@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-	lwz	r0,TSK_STACK_CANARY(r2)
-	lis	r4,__stack_chk_guard@ha
-	stw	r0,__stack_chk_guard@l(r4)
-#endif
+
 	lwz	r0,_CCR(r1)
 	lwz	r0,_CCR(r1)
 	mtcrf	0xFF,r0
 	mtcrf	0xFF,r0
 	/* r3-r12 are destroyed -- Cort */
 	/* r3-r12 are destroyed -- Cort */

+ 0 - 8
arch/powerpc/kernel/module_64.c

@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
 	for (end = (void *)vers + size; vers < end; vers++)
 	for (end = (void *)vers + size; vers < end; vers++)
 		if (vers->name[0] == '.') {
 		if (vers->name[0] == '.') {
 			memmove(vers->name, vers->name+1, strlen(vers->name));
 			memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-			/* The TOC symbol has no CRC computed. To avoid CRC
-			 * check failing, we must force it to the expected
-			 * value (see CRC check in module.c).
-			 */
-			if (!strcmp(vers->name, "TOC."))
-				vers->crc = -(unsigned long)reloc_start;
-#endif
 		}
 		}
 }
 }
 
 

+ 0 - 6
arch/powerpc/kernel/process.c

@@ -64,12 +64,6 @@
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
 
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
 #define TM_DEBUG(x...) printk(KERN_INFO x)

+ 3 - 0
arch/powerpc/kernel/prom_init.c

@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
 
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
 
+	if (!PHANDLE_VALID(cpu_pkg))
+		return;
+
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
 	prom.cpu = be32_to_cpu(rval);
 	prom.cpu = be32_to_cpu(rval);
 
 

+ 2 - 2
arch/powerpc/mm/pgtable-radix.c

@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		if (!pmdp)
 		if (!pmdp)
 			return -ENOMEM;
 			return -ENOMEM;
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		ptep = pte_alloc_kernel(pmdp, ea);
 		ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 		}
 		}
 		pmdp = pmd_offset(pudp, ea);
 		pmdp = pmd_offset(pudp, ea);
 		if (map_page_size == PMD_SIZE) {
 		if (map_page_size == PMD_SIZE) {
-			ptep = (pte_t *)pudp;
+			ptep = pmdp_ptep(pmdp);
 			goto set_the_pte;
 			goto set_the_pte;
 		}
 		}
 		if (!pmd_present(*pmdp)) {
 		if (!pmd_present(*pmdp)) {

+ 4 - 4
arch/sparc/include/asm/mmu_context_64.h

@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
 static inline void tsb_context_switch(struct mm_struct *mm)
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
 {
 	__tsb_context_switch(__pa(mm->pgd),
 	__tsb_context_switch(__pa(mm->pgd),
-			     &mm->context.tsb_block[0],
+			     &mm->context.tsb_block[MM_TSB_BASE],
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-			     (mm->context.tsb_block[1].tsb ?
-			      &mm->context.tsb_block[1] :
+			     (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+			      &mm->context.tsb_block[MM_TSB_HUGE] :
 			      NULL)
 			      NULL)
 #else
 #else
 			     NULL
 			     NULL
 #endif
 #endif
-			     , __pa(&mm->context.tsb_descr[0]));
+			     , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
 }
 }
 
 
 void tsb_grow(struct mm_struct *mm,
 void tsb_grow(struct mm_struct *mm,

+ 1 - 1
arch/sparc/kernel/irq_64.c

@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
 	unsigned long order = get_order(size);
 	unsigned long order = get_order(size);
 	unsigned long p;
 	unsigned long p;
 
 
-	p = __get_free_pages(GFP_KERNEL, order);
+	p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 	if (!p) {
 	if (!p) {
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_halt();
 		prom_halt();

+ 3 - 3
arch/sparc/kernel/sstate.c

@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
 	"Linux powering off";
 	"Linux powering off";
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 	"Linux rebooting";
 	"Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
-	"Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+	"Linux panicking";
 
 
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 {
 {
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
 
 
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 {
 {
-	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
 
 
 	return NOTIFY_DONE;
 	return NOTIFY_DONE;
 }
 }

+ 73 - 0
arch/sparc/kernel/traps_64.c

@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
 	atomic_inc(&sun4v_resum_oflow_cnt);
 	atomic_inc(&sun4v_resum_oflow_cnt);
 }
 }
 
 
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+	unsigned int insn;
+
+	if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+		return compute_effective_address(regs, insn,
+						 (insn >> 25) & 0x1f);
+	}
+	return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+				  struct sun4v_error_entry *ent) {
+
+	unsigned int attrs = ent->err_attrs;
+
+	if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+		unsigned long addr = ent->err_raddr;
+		siginfo_t info;
+
+		if (addr == ~(u64)0) {
+			/* This seems highly unlikely to ever occur */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+		} else {
+			unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+							      PAGE_SIZE);
+
+			/* Break the unfortunate news. */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+				 addr);
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
+				 page_cnt);
+
+			while (page_cnt-- > 0) {
+				if (pfn_valid(addr >> PAGE_SHIFT))
+					get_page(pfn_to_page(addr >> PAGE_SHIFT));
+				addr += PAGE_SIZE;
+			}
+		}
+		info.si_signo = SIGKILL;
+		info.si_errno = 0;
+		info.si_trapno = 0;
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+	if (attrs & SUN4V_ERR_ATTRS_PIO) {
+		siginfo_t info;
+
+		info.si_signo = SIGBUS;
+		info.si_code = BUS_ADRERR;
+		info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+
+	/* Default to doing nothing */
+	return false;
+}
+
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  * Log the event, clear the first word of the entry, and die.
  */
  */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 
 
 	put_cpu();
 	put_cpu();
 
 
+	if (!(regs->tstate & TSTATE_PRIV) &&
+	    sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+		/* DON'T PANIC: This userspace error was handled. */
+		return;
+	}
+
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
 	/* Check for the special PCI poke sequence. */
 	/* Check for the special PCI poke sequence. */
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {

+ 26 - 34
arch/x86/events/intel/rapl.c

@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
 {
-	return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 }
 
 
 static inline u64 rapl_read_counter(struct perf_event *event)
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
 
 	/* must be done before validate_group */
 	/* must be done before validate_group */
 	pmu = cpu_to_rapl_pmu(event->cpu);
 	pmu = cpu_to_rapl_pmu(event->cpu);
+	if (!pmu)
+		return -EINVAL;
 	event->cpu = pmu->cpu;
 	event->cpu = pmu->cpu;
 	event->pmu_private = pmu;
 	event->pmu_private = pmu;
 	event->hw.event_base = msr;
 	event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 	int target;
 
 
+	if (!pmu) {
+		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+		if (!pmu)
+			return -ENOMEM;
+
+		raw_spin_lock_init(&pmu->lock);
+		INIT_LIST_HEAD(&pmu->active_list);
+		pmu->pmu = &rapl_pmus->pmu;
+		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+		rapl_hrtimer_init(pmu);
+
+		rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+	}
+
 	/*
 	/*
 	 * Check if there is an online cpu in the package which collects rapl
 	 * Check if there is an online cpu in the package which collects rapl
 	 * events already.
 	 * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
 	return 0;
 	return 0;
 }
 }
 
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-	if (pmu)
-		return 0;
-
-	pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-	if (!pmu)
-		return -ENOMEM;
-
-	raw_spin_lock_init(&pmu->lock);
-	INIT_LIST_HEAD(&pmu->active_list);
-	pmu->pmu = &rapl_pmus->pmu;
-	pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-	pmu->cpu = -1;
-	rapl_hrtimer_init(pmu);
-	rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-	return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 static int rapl_check_hw_unit(bool apply_quirk)
 {
 {
 	u64 msr_rapl_power_unit_bits;
 	u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
 	/*
 	/*
 	 * Install callbacks. Core will call them for each online cpu.
 	 * Install callbacks. Core will call them for each online cpu.
 	 */
 	 */
-
-	ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-				rapl_cpu_prepare, NULL);
-	if (ret)
-		goto out;
-
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 				"perf/x86/rapl:online",
 				"perf/x86/rapl:online",
 				rapl_cpu_online, rapl_cpu_offline);
 				rapl_cpu_online, rapl_cpu_offline);
 	if (ret)
 	if (ret)
-		goto out1;
+		goto out;
 
 
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	if (ret)
 	if (ret)
-		goto out2;
+		goto out1;
 
 
 	rapl_advertise();
 	rapl_advertise();
 	return 0;
 	return 0;
 
 
-out2:
-	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
 out1:
-	cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
 out:
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 static void __exit intel_rapl_exit(void)
 {
 {
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	cleanup_rapl_pmus();
 	cleanup_rapl_pmus();
 }
 }

+ 91 - 141
arch/x86/events/intel/uncore.c

@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
 {
-	return pmu->boxes[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 }
 
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
 	pmu->registered = false;
 	pmu->registered = false;
 }
 }
 
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-	struct intel_uncore_pmu *pmu = type->pmus;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	if (pmu) {
-		pkg = topology_physical_package_id(cpu);
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box)
-				uncore_box_exit(box);
-		}
-	}
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-	struct intel_uncore_type **types;
-
-	for (types = uncore_msr_uncores; *types; types++)
-		__uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
 {
 	int pkg;
 	int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
 	}
 	}
 }
 }
 
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (box && atomic_dec_return(&box->refcnt) == 0)
-				uncore_box_exit(box);
-		}
-	}
-	return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg, ncpus = 1;
-
-	if (first_init) {
-		/*
-		 * On init we get the number of online cpus in the package
-		 * and set refcount for all of them.
-		 */
-		ncpus = cpumask_weight(topology_core_cpumask(cpu));
-	}
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			box = pmu->boxes[pkg];
-			if (!box)
-				continue;
-			/* The first cpu on a package activates the box */
-			if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-				uncore_box_init(box);
-		}
-	}
-
-	return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-	struct intel_uncore_type *type, **types = uncore_msr_uncores;
-	struct intel_uncore_pmu *pmu;
-	struct intel_uncore_box *box;
-	int i, pkg;
-
-	pkg = topology_logical_package_id(cpu);
-	for (; *types; types++) {
-		type = *types;
-		pmu = type->pmus;
-		for (i = 0; i < type->num_boxes; i++, pmu++) {
-			if (pmu->boxes[pkg])
-				continue;
-			/* First cpu of a package allocates the box */
-			box = uncore_alloc_box(type, cpu_to_node(cpu));
-			if (!box)
-				return -ENOMEM;
-			box->pmu = pmu;
-			box->pkgid = pkg;
-			pmu->boxes[pkg] = box;
-		}
-	}
-	return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
 				   int new_cpu)
 				   int new_cpu)
 {
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, pkg, target;
 
 
 	/* Check if exiting cpu is used for collecting uncore events */
 	/* Check if exiting cpu is used for collecting uncore events */
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-		return 0;
-
+		goto unref;
 	/* Find a new cpu to collect uncore events */
 	/* Find a new cpu to collect uncore events */
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
 
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+	/* Clear the references */
+	pkg = topology_logical_package_id(cpu);
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (box && atomic_dec_return(&box->refcnt) == 0)
+				uncore_box_exit(box);
+		}
+	}
 	return 0;
 	return 0;
 }
 }
 
 
+static int allocate_boxes(struct intel_uncore_type **types,
+			 unsigned int pkg, unsigned int cpu)
+{
+	struct intel_uncore_box *box, *tmp;
+	struct intel_uncore_type *type;
+	struct intel_uncore_pmu *pmu;
+	LIST_HEAD(allocated);
+	int i;
+
+	/* Try to allocate all required boxes */
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			if (pmu->boxes[pkg])
+				continue;
+			box = uncore_alloc_box(type, cpu_to_node(cpu));
+			if (!box)
+				goto cleanup;
+			box->pmu = pmu;
+			box->pkgid = pkg;
+			list_add(&box->active_list, &allocated);
+		}
+	}
+	/* Install them in the pmus */
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		box->pmu->boxes[pkg] = box;
+	}
+	return 0;
+
+cleanup:
+	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+		list_del_init(&box->active_list);
+		kfree(box);
+	}
+	return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 static int uncore_event_cpu_online(unsigned int cpu)
 {
 {
-	int target;
+	struct intel_uncore_type *type, **types = uncore_msr_uncores;
+	struct intel_uncore_pmu *pmu;
+	struct intel_uncore_box *box;
+	int i, ret, pkg, target;
+
+	pkg = topology_logical_package_id(cpu);
+	ret = allocate_boxes(types, pkg, cpu);
+	if (ret)
+		return ret;
+
+	for (; *types; types++) {
+		type = *types;
+		pmu = type->pmus;
+		for (i = 0; i < type->num_boxes; i++, pmu++) {
+			box = pmu->boxes[pkg];
+			if (!box && atomic_inc_return(&box->refcnt) == 1)
+				uncore_box_init(box);
+		}
+	}
 
 
 	/*
 	/*
 	 * Check if there is an online cpu in the package
 	 * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
 	if (cret && pret)
 	if (cret && pret)
 		return -ENODEV;
 		return -ENODEV;
 
 
-	/*
-	 * Install callbacks. Core will call them for each online cpu.
-	 *
-	 * The first online cpu of each package allocates and takes
-	 * the refcounts for all other online cpus in that package.
-	 * If msrs are not enabled no allocation is required and
-	 * uncore_cpu_prepare() is not called for each online cpu.
-	 */
-	if (!cret) {
-	       ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-				       "perf/x86/intel/uncore:prepare",
-				       uncore_cpu_prepare, NULL);
-		if (ret)
-			goto err;
-	} else {
-		cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-					  "perf/x86/intel/uncore:prepare",
-					  uncore_cpu_prepare, NULL);
-	}
-	first_init = 1;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-			  "perf/x86/uncore:starting",
-			  uncore_cpu_starting, uncore_cpu_dying);
-	first_init = 0;
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-			  "perf/x86/uncore:online",
-			  uncore_event_cpu_online, uncore_event_cpu_offline);
+	/* Install hotplug callbacks to setup the targets for each package */
+	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+				"perf/x86/intel/uncore:online",
+				uncore_event_cpu_online,
+				uncore_event_cpu_offline);
+	if (ret)
+		goto err;
 	return 0;
 	return 0;
 
 
 err:
 err:
-	/* Undo box->init_box() */
-	on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 	return ret;
 	return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
 
 
 static void __exit intel_uncore_exit(void)
 static void __exit intel_uncore_exit(void)
 {
 {
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
 	uncore_pci_exit();
 }
 }

+ 1 - 0
arch/x86/include/asm/microcode.h

@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 #else
 static inline int __init microcode_init(void)			{ return 0; };
 static inline int __init microcode_init(void)			{ return 0; };
 static inline void __init load_ucode_bsp(void)			{ }
 static inline void __init load_ucode_bsp(void)			{ }

+ 2 - 0
arch/x86/kernel/apic/io_apic.c

@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
 			if (idx != -1 && irq_trigger(idx))
 			if (idx != -1 && irq_trigger(idx))
 				unmask_ioapic_irq(irq_get_chip_data(0));
 				unmask_ioapic_irq(irq_get_chip_data(0));
 		}
 		}
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {
 			if (disable_timer_pin_1 > 0)
 			if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
 		 * legacy devices should be connected to IO APIC #0
 		 * legacy devices should be connected to IO APIC #0
 		 */
 		 */
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
 		replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
 		legacy_pic->unmask(0);
 		legacy_pic->unmask(0);
 		if (timer_irq_works()) {
 		if (timer_irq_works()) {

+ 12 - 19
arch/x86/kernel/cpu/mcheck/mce.c

@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
 {
 	unsigned long when = jiffies + interval;
 	unsigned long when = jiffies + interval;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
-	if (timer_pending(t)) {
-		if (time_before(when, t->expires))
-			mod_timer(t, when);
-	} else {
-		t->expires = round_jiffies(when);
-		add_timer_on(t, smp_processor_id());
-	}
+	if (!timer_pending(t) || time_before(when, t->expires))
+		mod_timer(t, round_jiffies(when));
 
 
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
 
 
 done:
 done:
 	__this_cpu_write(mce_next_interval, iv);
 	__this_cpu_write(mce_next_interval, iv);
-	__restart_timer(t, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 /*
 /*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 
 
-	__restart_timer(t, interval);
+	__start_timer(t, interval);
 
 
 	if (interval < iv)
 	if (interval < iv)
 		__this_cpu_write(mce_next_interval, interval);
 		__this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
 	}
 	}
 }
 }
 
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
 {
 	unsigned long iv = check_interval * HZ;
 	unsigned long iv = check_interval * HZ;
 
 
 	if (mca_cfg.ignore_ce || !iv)
 	if (mca_cfg.ignore_ce || !iv)
 		return;
 		return;
 
 
-	per_cpu(mce_next_interval, cpu) = iv;
-
-	t->expires = round_jiffies(jiffies + iv);
-	add_timer_on(t, cpu);
+	this_cpu_write(mce_next_interval, iv);
+	__start_timer(t, iv);
 }
 }
 
 
 static void __mcheck_cpu_setup_timer(void)
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 
 
 	setup_pinned_timer(t, mce_timer_fn, cpu);
 	setup_pinned_timer(t, mce_timer_fn, cpu);
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 }
 }
 
 
 /* Handle unconfigured int18 (should never happen) */
 /* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 
 static int mce_cpu_online(unsigned int cpu)
 static int mce_cpu_online(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	int ret;
 	int ret;
 
 
 	mce_device_create(cpu);
 	mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
 		return ret;
 		return ret;
 	}
 	}
 	mce_reenable_cpu();
 	mce_reenable_cpu();
-	mce_start_timer(cpu, t);
+	mce_start_timer(t);
 	return 0;
 	return 0;
 }
 }
 
 
 static int mce_cpu_pre_down(unsigned int cpu)
 static int mce_cpu_pre_down(unsigned int cpu)
 {
 {
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+	struct timer_list *t = this_cpu_ptr(&mce_timer);
 
 
 	mce_disable_cpu();
 	mce_disable_cpu();
 	del_timer_sync(t);
 	del_timer_sync(t);

+ 3 - 2
arch/x86/kernel/cpu/microcode/amd.c

@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
 reget:
 reget:
 		if (!get_builtin_microcode(&cp, family)) {
 		if (!get_builtin_microcode(&cp, family)) {
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
-			cp = find_cpio_data(ucode_path, (void *)initrd_start,
-					    initrd_end - initrd_start, NULL);
+			if (!initrd_gone)
+				cp = find_cpio_data(ucode_path, (void *)initrd_start,
+						    initrd_end - initrd_start, NULL);
 #endif
 #endif
 			if (!(cp.data && cp.size)) {
 			if (!(cp.data && cp.size)) {
 				/*
 				/*

+ 17 - 5
arch/x86/kernel/cpu/microcode/core.c

@@ -46,6 +46,8 @@
 static struct microcode_ops	*microcode_ops;
 static struct microcode_ops	*microcode_ops;
 static bool dis_ucode_ldr = true;
 static bool dis_ucode_ldr = true;
 
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 LIST_HEAD(microcode_cache);
 
 
 /*
 /*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 static int __init save_microcode_in_initrd(void)
 {
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 	struct cpuinfo_x86 *c = &boot_cpu_data;
+	int ret = -EINVAL;
 
 
 	switch (c->x86_vendor) {
 	switch (c->x86_vendor) {
 	case X86_VENDOR_INTEL:
 	case X86_VENDOR_INTEL:
 		if (c->x86 >= 6)
 		if (c->x86 >= 6)
-			return save_microcode_in_initrd_intel();
+			ret = save_microcode_in_initrd_intel();
 		break;
 		break;
 	case X86_VENDOR_AMD:
 	case X86_VENDOR_AMD:
 		if (c->x86 >= 0x10)
 		if (c->x86 >= 0x10)
-			return save_microcode_in_initrd_amd(c->x86);
+			ret = save_microcode_in_initrd_amd(c->x86);
 		break;
 		break;
 	default:
 	default:
 		break;
 		break;
 	}
 	}
 
 
-	return -EINVAL;
+	initrd_gone = true;
+
+	return ret;
 }
 }
 
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 	 * has the virtual address of the beginning of the initrd. It also
 	 * has the virtual address of the beginning of the initrd. It also
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * possibly relocates the ramdisk. In either case, initrd_start contains
 	 * the updated address so use that instead.
 	 * the updated address so use that instead.
+	 *
+	 * initrd_gone is for the hotplug case where we've thrown out initrd
+	 * already.
 	 */
 	 */
-	if (!use_pa && initrd_start)
-		start = initrd_start;
+	if (!use_pa) {
+		if (initrd_gone)
+			return (struct cpio_data){ NULL, 0, "" };
+		if (initrd_start)
+			start = initrd_start;
+	}
 
 
 	return find_cpio_data(path, (void *)start, size, NULL);
 	return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
 #else /* !CONFIG_BLK_DEV_INITRD */

+ 1 - 8
arch/x86/kernel/cpu/microcode/intel.c

@@ -41,7 +41,7 @@
 
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 struct microcode_intel *intel_ucode_patch;
 
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
 	struct ucode_cpu_info uci;
 	struct ucode_cpu_info uci;
 	struct cpio_data cp;
 	struct cpio_data cp;
 
 
-	/*
-	 * AP loading didn't find any microcode patch, no need to save anything.
-	 */
-	if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-		return 0;
-
 	if (!load_builtin_intel_microcode(&cp))
 	if (!load_builtin_intel_microcode(&cp))
 		cp = find_microcode_in_initrd(ucode_path, false);
 		cp = find_microcode_in_initrd(ucode_path, false);
 
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
 	return 0;
 	return 0;
 }
 }
 
 
-
 /*
 /*
  * @res_patch, output: a pointer to the patch we found.
  * @res_patch, output: a pointer to the patch we found.
  */
  */

+ 3 - 1
arch/x86/kernel/fpu/core.c

@@ -9,6 +9,7 @@
 #include <asm/fpu/regset.h>
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
 #include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
 #include <asm/traps.h>
 #include <asm/traps.h>
 
 
 #include <linux/hardirq.h>
 #include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
 	 * it will #GP. Make sure it is replaced after the memset().
 	 * it will #GP. Make sure it is replaced after the memset().
 	 */
 	 */
 	if (static_cpu_has(X86_FEATURE_XSAVES))
 	if (static_cpu_has(X86_FEATURE_XSAVES))
-		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+					       xfeatures_mask;
 
 
 	if (static_cpu_has(X86_FEATURE_FXSR))
 	if (static_cpu_has(X86_FEATURE_FXSR))
 		fpstate_init_fxstate(&state->fxsave);
 		fpstate_init_fxstate(&state->fxsave);

+ 1 - 0
arch/x86/kernel/hpet.c

@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
 	} else {
 	} else {
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
 
+		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
 		disable_irq(hdev->irq);
 		disable_irq(hdev->irq);
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));

+ 1 - 0
arch/x86/kvm/x86.c

@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 	memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
 
 	/* Set XSTATE_BV */
 	/* Set XSTATE_BV */
+	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
 
 	/*
 	/*

+ 16 - 0
arch/x86/platform/efi/efi_64.c

@@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
 
 	efi_scratch.use_pgd = true;
 	efi_scratch.use_pgd = true;
 
 
+	/*
+	 * Certain firmware versions are way too sentimential and still believe
+	 * they are exclusive and unquestionable owners of the first physical page,
+	 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+	 * (but then write-access it later during SetVirtualAddressMap()).
+	 *
+	 * Create a 1:1 mapping for this page, to avoid triple faults during early
+	 * boot with such firmware. We are free to hand this page to the BIOS,
+	 * as trim_bios_range() will reserve the first page and isolate it away
+	 * from memory allocators anyway.
+	 */
+	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+		pr_err("Failed to create 1:1 mapping for the first page!\n");
+		return 1;
+	}
+
 	/*
 	/*
 	 * When making calls to the firmware everything needs to be 1:1
 	 * When making calls to the firmware everything needs to be 1:1
 	 * mapped and addressable with 32-bit pointers. Map the kernel
 	 * mapped and addressable with 32-bit pointers. Map the kernel

+ 1 - 1
arch/xtensa/kernel/setup.c

@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 
 void cpu_reset(void)
 void cpu_reset(void)
 {
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
 	local_irq_disable();
 	local_irq_disable();
 	/*
 	/*
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
 	 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must

+ 1 - 0
crypto/algapi.c

@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
 	struct crypto_larval *larval;
 	struct crypto_larval *larval;
 	int err;
 	int err;
 
 
+	alg->cra_flags &= ~CRYPTO_ALG_DEAD;
 	err = crypto_check_alg(alg);
 	err = crypto_check_alg(alg);
 	if (err)
 	if (err)
 		return err;
 		return err;

+ 4 - 2
drivers/ata/libata-core.c

@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 
 
 		if (qc->err_mask & ~AC_ERR_OTHER)
 		if (qc->err_mask & ~AC_ERR_OTHER)
 			qc->err_mask &= ~AC_ERR_OTHER;
 			qc->err_mask &= ~AC_ERR_OTHER;
+	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+		qc->result_tf.command |= ATA_SENSE;
 	}
 	}
 
 
 	/* finish up */
 	/* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
 
 
 	/*
 	/*
-	 * Device times out with higher max sects.
+	 * These devices time out with higher max sects.
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
 	 */
 	 */
-	{ "LITEON CX1-JB256-HP", NULL,		ATA_HORKAGE_MAX_SEC_1024 },
+	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
 
 
 	/* Devices we expect to fail diagnostics */
 	/* Devices we expect to fail diagnostics */
 
 

+ 3 - 0
drivers/ata/sata_mv.c

@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
 	host->iomap = NULL;
 	host->iomap = NULL;
 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
 				   resource_size(res));
 				   resource_size(res));
+	if (!hpriv->base)
+		return -ENOMEM;
+
 	hpriv->base -= SATAHC0_REG_BASE;
 	hpriv->base -= SATAHC0_REG_BASE;
 
 
 	hpriv->clk = clk_get(&pdev->dev, NULL);
 	hpriv->clk = clk_get(&pdev->dev, NULL);

+ 1 - 4
drivers/base/firmware_class.c

@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 	struct firmware_buf *buf = fw_priv->buf;
 	struct firmware_buf *buf = fw_priv->buf;
 
 
 	__fw_load_abort(buf);
 	__fw_load_abort(buf);
-
-	/* avoid user action after loading abort */
-	fw_priv->buf = NULL;
 }
 }
 
 
 static LIST_HEAD(pending_fw_head);
 static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 
 
 	mutex_lock(&fw_lock);
 	mutex_lock(&fw_lock);
 	fw_buf = fw_priv->buf;
 	fw_buf = fw_priv->buf;
-	if (!fw_buf)
+	if (fw_state_is_aborted(&fw_buf->fw_st))
 		goto out;
 		goto out;
 
 
 	switch (loading) {
 	switch (loading) {

+ 6 - 6
drivers/base/memory.c

@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
 {
 	struct memory_block *mem = to_memory_block(dev);
 	struct memory_block *mem = to_memory_block(dev);
 	unsigned long start_pfn, end_pfn;
 	unsigned long start_pfn, end_pfn;
+	unsigned long valid_start, valid_end, valid_pages;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-	struct page *first_page;
 	struct zone *zone;
 	struct zone *zone;
 	int zone_shift = 0;
 	int zone_shift = 0;
 
 
 	start_pfn = section_nr_to_pfn(mem->start_section_nr);
 	start_pfn = section_nr_to_pfn(mem->start_section_nr);
 	end_pfn = start_pfn + nr_pages;
 	end_pfn = start_pfn + nr_pages;
-	first_page = pfn_to_page(start_pfn);
 
 
 	/* The block contains more than one zone can not be offlined. */
 	/* The block contains more than one zone can not be offlined. */
-	if (!test_pages_in_a_zone(start_pfn, end_pfn))
+	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
 		return sprintf(buf, "none\n");
 		return sprintf(buf, "none\n");
 
 
-	zone = page_zone(first_page);
+	zone = page_zone(pfn_to_page(valid_start));
+	valid_pages = valid_end - valid_start;
 
 
 	/* MMOP_ONLINE_KEEP */
 	/* MMOP_ONLINE_KEEP */
 	sprintf(buf, "%s", zone->name);
 	sprintf(buf, "%s", zone->name);
 
 
 	/* MMOP_ONLINE_KERNEL */
 	/* MMOP_ONLINE_KERNEL */
-	zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+	zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
 	if (zone_shift) {
 	if (zone_shift) {
 		strcat(buf, " ");
 		strcat(buf, " ");
 		strcat(buf, (zone + zone_shift)->name);
 		strcat(buf, (zone + zone_shift)->name);
 	}
 	}
 
 
 	/* MMOP_ONLINE_MOVABLE */
 	/* MMOP_ONLINE_MOVABLE */
-	zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+	zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
 	if (zone_shift) {
 	if (zone_shift) {
 		strcat(buf, " ");
 		strcat(buf, " ");
 		strcat(buf, (zone + zone_shift)->name);
 		strcat(buf, (zone + zone_shift)->name);

+ 3 - 0
drivers/bcma/bcma_private.h

@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
 
 
 /* driver_chipcommon_b.c */
 /* driver_chipcommon_b.c */
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);

+ 3 - 8
drivers/bcma/driver_chipcommon.c

@@ -15,8 +15,6 @@
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/bcma/bcma.h>
 
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
 					 u32 mask, u32 value)
 					 u32 mask, u32 value)
 {
 {
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
 	if (cc->capabilities & BCMA_CC_CAP_PMU)
 	if (cc->capabilities & BCMA_CC_CAP_PMU)
 		bcma_pmu_early_init(cc);
 		bcma_pmu_early_init(cc);
 
 
-	if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
-		bcma_chipco_serial_init(cc);
-
 	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
 	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
 		bcma_core_chipcommon_flash_detect(cc);
 		bcma_core_chipcommon_flash_detect(cc);
 
 
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
 	return res;
 	return res;
 }
 }
 
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
 {
-#if IS_BUILTIN(CONFIG_BCM47XX)
 	unsigned int irq;
 	unsigned int irq;
 	u32 baud_base;
 	u32 baud_base;
 	u32 i;
 	u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 		ports[i].baud_base = baud_base;
 		ports[i].baud_base = baud_base;
 		ports[i].reg_shift = 0;
 		ports[i].reg_shift = 0;
 	}
 	}
-#endif /* CONFIG_BCM47XX */
 }
 }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */

+ 3 - 0
drivers/bcma/driver_mips.c

@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
 
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
 {
+	struct bcma_bus *bus = mcore->core->bus;
+
 	if (mcore->early_setup_done)
 	if (mcore->early_setup_done)
 		return;
 		return;
 
 
+	bcma_chipco_serial_init(&bus->drv_cc);
 	bcma_core_mips_nvram_init(mcore);
 	bcma_core_mips_nvram_init(mcore);
 
 
 	mcore->early_setup_done = true;
 	mcore->early_setup_done = true;

+ 46 - 23
drivers/dma/cppi41.c

@@ -153,6 +153,8 @@ struct cppi41_dd {
 
 
 	/* context for suspend/resume */
 	/* context for suspend/resume */
 	unsigned int dma_tdfdq;
 	unsigned int dma_tdfdq;
+
+	bool is_suspended;
 };
 };
 
 
 #define FIST_COMPLETION_QUEUE	93
 #define FIST_COMPLETION_QUEUE	93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
 	BUG_ON(desc_num >= ALLOC_DECS_NUM);
 	BUG_ON(desc_num >= ALLOC_DECS_NUM);
 	c = cdd->chan_busy[desc_num];
 	c = cdd->chan_busy[desc_num];
 	cdd->chan_busy[desc_num] = NULL;
 	cdd->chan_busy[desc_num] = NULL;
+
+	/* Usecount for chan_busy[], paired with push_desc_queue() */
+	pm_runtime_put(cdd->ddev.dev);
+
 	return c;
 	return c;
 }
 }
 
 
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 
 
 		while (val) {
 		while (val) {
 			u32 desc, len;
 			u32 desc, len;
-			int error;
 
 
-			error = pm_runtime_get(cdd->ddev.dev);
-			if (error < 0)
-				dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-					__func__, error);
+			/*
+			 * This should never trigger, see the comments in
+			 * push_desc_queue()
+			 */
+			WARN_ON(cdd->is_suspended);
 
 
 			q_num = __fls(val);
 			q_num = __fls(val);
 			val &= ~(1 << q_num);
 			val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 			c->residue = pd_trans_len(c->desc->pd6) - len;
 			c->residue = pd_trans_len(c->desc->pd6) - len;
 			dma_cookie_complete(&c->txd);
 			dma_cookie_complete(&c->txd);
 			dmaengine_desc_get_callback_invoke(&c->txd, NULL);
 			dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
-			pm_runtime_mark_last_busy(cdd->ddev.dev);
-			pm_runtime_put_autosuspend(cdd->ddev.dev);
 		}
 		}
 	}
 	}
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
 	 */
 	 */
 	__iowmb();
 	__iowmb();
 
 
+	/*
+	 * DMA transfers can take at least 200ms to complete with USB mass
+	 * storage connected. To prevent autosuspend timeouts, we must use
+	 * pm_runtime_get/put() when chan_busy[] is modified. This will get
+	 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+	 * outcome of the transfer.
+	 */
+	pm_runtime_get(cdd->ddev.dev);
+
 	desc_phys = lower_32_bits(c->desc_phys);
 	desc_phys = lower_32_bits(c->desc_phys);
 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	WARN_ON(cdd->chan_busy[desc_num]);
 	WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
 	cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 	cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 }
 }
 
 
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
 {
 {
-	struct cppi41_dd *cdd = c->cdd;
-	unsigned long flags;
+	struct cppi41_channel *c, *_c;
 
 
-	spin_lock_irqsave(&cdd->lock, flags);
-	list_add_tail(&c->node, &cdd->pending);
-	spin_unlock_irqrestore(&cdd->lock, flags);
+	list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+		push_desc_queue(c);
+		list_del(&c->node);
+	}
 }
 }
 
 
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 {
 {
 	struct cppi41_channel *c = to_cpp41_chan(chan);
 	struct cppi41_channel *c = to_cpp41_chan(chan);
 	struct cppi41_dd *cdd = c->cdd;
 	struct cppi41_dd *cdd = c->cdd;
+	unsigned long flags;
 	int error;
 	int error;
 
 
 	error = pm_runtime_get(cdd->ddev.dev);
 	error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
 		return;
 		return;
 	}
 	}
 
 
-	if (likely(pm_runtime_active(cdd->ddev.dev)))
-		push_desc_queue(c);
-	else
-		pending_desc(c);
+	spin_lock_irqsave(&cdd->lock, flags);
+	list_add_tail(&c->node, &cdd->pending);
+	if (!cdd->is_suspended)
+		cppi41_run_queue(cdd);
+	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	pm_runtime_mark_last_busy(cdd->ddev.dev);
 	pm_runtime_mark_last_busy(cdd->ddev.dev);
 	pm_runtime_put_autosuspend(cdd->ddev.dev);
 	pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
 	WARN_ON(!cdd->chan_busy[desc_num]);
 	WARN_ON(!cdd->chan_busy[desc_num]);
 	cdd->chan_busy[desc_num] = NULL;
 	cdd->chan_busy[desc_num] = NULL;
 
 
+	/* Usecount for chan_busy[], paired with push_desc_queue() */
+	pm_runtime_put(cdd->ddev.dev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 {
 {
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
+	unsigned long flags;
 
 
+	spin_lock_irqsave(&cdd->lock, flags);
+	cdd->is_suspended = true;
 	WARN_ON(!list_empty(&cdd->pending));
 	WARN_ON(!list_empty(&cdd->pending));
+	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 {
 {
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
-	struct cppi41_channel *c, *_c;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	spin_lock_irqsave(&cdd->lock, flags);
 	spin_lock_irqsave(&cdd->lock, flags);
-	list_for_each_entry_safe(c, _c, &cdd->pending, node) {
-		push_desc_queue(c);
-		list_del(&c->node);
-	}
+	cdd->is_suspended = false;
+	cppi41_run_queue(cdd);
 	spin_unlock_irqrestore(&cdd->lock, flags);
 	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	return 0;
 	return 0;

+ 6 - 13
drivers/dma/pl330.c

@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
 {
 	struct pl330_thread *thrd = NULL;
 	struct pl330_thread *thrd = NULL;
-	unsigned long flags;
 	int chans, i;
 	int chans, i;
 
 
 	if (pl330->state == DYING)
 	if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 
 
 	chans = pl330->pcfg.num_chan;
 	chans = pl330->pcfg.num_chan;
 
 
-	spin_lock_irqsave(&pl330->lock, flags);
-
 	for (i = 0; i < chans; i++) {
 	for (i = 0; i < chans; i++) {
 		thrd = &pl330->channels[i];
 		thrd = &pl330->channels[i];
 		if ((thrd->free) && (!_manager_ns(thrd) ||
 		if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 		thrd = NULL;
 		thrd = NULL;
 	}
 	}
 
 
-	spin_unlock_irqrestore(&pl330->lock, flags);
-
 	return thrd;
 	return thrd;
 }
 }
 
 
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
 static void pl330_release_channel(struct pl330_thread *thrd)
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
 {
 	struct pl330_dmac *pl330;
 	struct pl330_dmac *pl330;
-	unsigned long flags;
 
 
 	if (!thrd || thrd->free)
 	if (!thrd || thrd->free)
 		return;
 		return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
 
 
 	pl330 = thrd->dmac;
 	pl330 = thrd->dmac;
 
 
-	spin_lock_irqsave(&pl330->lock, flags);
 	_free_event(thrd, thrd->ev);
 	_free_event(thrd, thrd->ev);
 	thrd->free = true;
 	thrd->free = true;
-	spin_unlock_irqrestore(&pl330->lock, flags);
 }
 }
 
 
 /* Initialize the structure for PL330 configuration, that can be used
 /* Initialize the structure for PL330 configuration, that can be used
@@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
 	struct pl330_dmac *pl330 = pch->dmac;
 	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 
 	dma_cookie_init(chan);
 	dma_cookie_init(chan);
 	pch->cyclic = false;
 	pch->cyclic = false;
 
 
 	pch->thread = pl330_request_channel(pl330);
 	pch->thread = pl330_request_channel(pl330);
 	if (!pch->thread) {
 	if (!pch->thread) {
-		spin_unlock_irqrestore(&pch->lock, flags);
+		spin_unlock_irqrestore(&pl330->lock, flags);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 
 
 	return 1;
 	return 1;
 }
 }
@@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
 	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	tasklet_kill(&pch->task);
 	tasklet_kill(&pch->task);
 
 
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 
 	pl330_release_channel(pch->thread);
 	pl330_release_channel(pch->thread);
 	pch->thread = NULL;
 	pch->thread = NULL;
@@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
 	if (pch->cyclic)
 	if (pch->cyclic)
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
 }

+ 3 - 11
drivers/firmware/efi/libstub/fdt.c

@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
 struct exit_boot_struct {
 struct exit_boot_struct {
 	efi_memory_desc_t *runtime_map;
 	efi_memory_desc_t *runtime_map;
 	int *runtime_entry_count;
 	int *runtime_entry_count;
+	void *new_fdt_addr;
 };
 };
 
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
 	efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
 	efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
 			p->runtime_map, p->runtime_entry_count);
 			p->runtime_map, p->runtime_entry_count);
 
 
-	return EFI_SUCCESS;
+	return update_fdt_memmap(p->new_fdt_addr, map);
 }
 }
 
 
 /*
 /*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
 
 	priv.runtime_map = runtime_map;
 	priv.runtime_map = runtime_map;
 	priv.runtime_entry_count = &runtime_entry_count;
 	priv.runtime_entry_count = &runtime_entry_count;
+	priv.new_fdt_addr = (void *)*new_fdt_addr;
 	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
 	status = efi_exit_boot_services(sys_table, handle, &map, &priv,
 					exit_boot_func);
 					exit_boot_func);
 
 
 	if (status == EFI_SUCCESS) {
 	if (status == EFI_SUCCESS) {
 		efi_set_virtual_address_map_t *svam;
 		efi_set_virtual_address_map_t *svam;
 
 
-		status = update_fdt_memmap((void *)*new_fdt_addr, &map);
-		if (status != EFI_SUCCESS) {
-			/*
-			 * The kernel won't get far without the memory map, but
-			 * may still be able to print something meaningful so
-			 * return success here.
-			 */
-			return EFI_SUCCESS;
-		}
-
 		/* Install the new virtual address map */
 		/* Install the new virtual address map */
 		svam = sys_table->runtime->set_virtual_address_map;
 		svam = sys_table->runtime->set_virtual_address_map;
 		status = svam(runtime_entry_count * desc_size, desc_size,
 		status = svam(runtime_entry_count * desc_size, desc_size,

+ 3 - 1
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c

@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 	}
 	}
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
 
+	if (adev->mode_info.num_crtc)
+		amdgpu_display_set_vga_render_state(adev, false);
+
 	gmc_v6_0_mc_stop(adev, &save);
 	gmc_v6_0_mc_stop(adev, &save);
 
 
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
 	}
 	gmc_v6_0_mc_resume(adev, &save);
 	gmc_v6_0_mc_resume(adev, &save);
-	amdgpu_display_set_vga_render_state(adev, false);
 }
 }
 
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)

+ 8 - 5
drivers/gpu/drm/drm_atomic.c

@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
 	}
 	}
 
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct drm_pending_vblank_event *event = crtc_state->event;
 		/*
 		/*
-		 * TEST_ONLY and PAGE_FLIP_EVENT are mutually
-		 * exclusive, if they weren't, this code should be
-		 * called on success for TEST_ONLY too.
+		 * Free the allocated event. drm_atomic_helper_setup_commit
+		 * can allocate an event too, so only free it if it's ours
+		 * to prevent a double free in drm_atomic_state_clear.
 		 */
 		 */
-		if (crtc_state->event)
-			drm_event_cancel_free(dev, &crtc_state->event->base);
+		if (event && (event->base.fence || event->base.file_priv)) {
+			drm_event_cancel_free(dev, &event->base);
+			crtc_state->event = NULL;
+		}
 	}
 	}
 
 
 	if (!fence_state)
 	if (!fence_state)

+ 0 - 9
drivers/gpu/drm/drm_atomic_helper.c

@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
 
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-			continue;
-
 		if (funcs->prepare_fb) {
 		if (funcs->prepare_fb) {
 			ret = funcs->prepare_fb(plane, plane_state);
 			ret = funcs->prepare_fb(plane, plane_state);
 			if (ret)
 			if (ret)
@@ -1685,9 +1682,6 @@ fail:
 		if (j >= i)
 		if (j >= i)
 			continue;
 			continue;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-			continue;
-
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
 		if (funcs->cleanup_fb)
 		if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
 	for_each_plane_in_state(old_state, plane, plane_state, i) {
 	for_each_plane_in_state(old_state, plane, plane_state, i) {
 		const struct drm_plane_helper_funcs *funcs;
 		const struct drm_plane_helper_funcs *funcs;
 
 
-		if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
-			continue;
-
 		funcs = plane->helper_private;
 		funcs = plane->helper_private;
 
 
 		if (funcs->cleanup_fb)
 		if (funcs->cleanup_fb)

+ 18 - 5
drivers/gpu/drm/drm_connector.c

@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
 
 
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->modes);
 	INIT_LIST_HEAD(&connector->modes);
+	mutex_init(&connector->mutex);
 	connector->edid_blob_ptr = NULL;
 	connector->edid_blob_ptr = NULL;
 	connector->status = connector_status_unknown;
 	connector->status = connector_status_unknown;
 
 
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
 		connector->funcs->atomic_destroy_state(connector,
 		connector->funcs->atomic_destroy_state(connector,
 						       connector->state);
 						       connector->state);
 
 
+	mutex_destroy(&connector->mutex);
+
 	memset(connector, 0, sizeof(*connector));
 	memset(connector, 0, sizeof(*connector));
 }
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
  */
  */
 int drm_connector_register(struct drm_connector *connector)
 int drm_connector_register(struct drm_connector *connector)
 {
 {
-	int ret;
+	int ret = 0;
 
 
-	if (connector->registered)
+	if (!connector->dev->registered)
 		return 0;
 		return 0;
 
 
+	mutex_lock(&connector->mutex);
+	if (connector->registered)
+		goto unlock;
+
 	ret = drm_sysfs_connector_add(connector);
 	ret = drm_sysfs_connector_add(connector);
 	if (ret)
 	if (ret)
-		return ret;
+		goto unlock;
 
 
 	ret = drm_debugfs_connector_add(connector);
 	ret = drm_debugfs_connector_add(connector);
 	if (ret) {
 	if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
 	drm_mode_object_register(connector->dev, &connector->base);
 	drm_mode_object_register(connector->dev, &connector->base);
 
 
 	connector->registered = true;
 	connector->registered = true;
-	return 0;
+	goto unlock;
 
 
 err_debugfs:
 err_debugfs:
 	drm_debugfs_connector_remove(connector);
 	drm_debugfs_connector_remove(connector);
 err_sysfs:
 err_sysfs:
 	drm_sysfs_connector_remove(connector);
 	drm_sysfs_connector_remove(connector);
+unlock:
+	mutex_unlock(&connector->mutex);
 	return ret;
 	return ret;
 }
 }
 EXPORT_SYMBOL(drm_connector_register);
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
  */
  */
 void drm_connector_unregister(struct drm_connector *connector)
 void drm_connector_unregister(struct drm_connector *connector)
 {
 {
-	if (!connector->registered)
+	mutex_lock(&connector->mutex);
+	if (!connector->registered) {
+		mutex_unlock(&connector->mutex);
 		return;
 		return;
+	}
 
 
 	if (connector->funcs->early_unregister)
 	if (connector->funcs->early_unregister)
 		connector->funcs->early_unregister(connector);
 		connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
 	drm_debugfs_connector_remove(connector);
 	drm_debugfs_connector_remove(connector);
 
 
 	connector->registered = false;
 	connector->registered = false;
+	mutex_unlock(&connector->mutex);
 }
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 EXPORT_SYMBOL(drm_connector_unregister);
 
 

+ 4 - 0
drivers/gpu/drm/drm_drv.c

@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 	if (ret)
 	if (ret)
 		goto err_minors;
 		goto err_minors;
 
 
+	dev->registered = true;
+
 	if (dev->driver->load) {
 	if (dev->driver->load) {
 		ret = dev->driver->load(dev, flags);
 		ret = dev->driver->load(dev, flags);
 		if (ret)
 		if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
 
 	drm_lastclose(dev);
 	drm_lastclose(dev);
 
 
+	dev->registered = false;
+
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_modeset_unregister_all(dev);
 		drm_modeset_unregister_all(dev);
 
 

+ 4 - 12
drivers/gpu/drm/i915/i915_drv.h

@@ -1012,6 +1012,8 @@ struct intel_fbc {
 	struct work_struct underrun_work;
 	struct work_struct underrun_work;
 
 
 	struct intel_fbc_state_cache {
 	struct intel_fbc_state_cache {
+		struct i915_vma *vma;
+
 		struct {
 		struct {
 			unsigned int mode_flags;
 			unsigned int mode_flags;
 			uint32_t hsw_bdw_pixel_rate;
 			uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
 		} plane;
 		} plane;
 
 
 		struct {
 		struct {
-			u64 ilk_ggtt_offset;
 			uint32_t pixel_format;
 			uint32_t pixel_format;
 			unsigned int stride;
 			unsigned int stride;
-			int fence_reg;
-			unsigned int tiling_mode;
 		} fb;
 		} fb;
 	} state_cache;
 	} state_cache;
 
 
 	struct intel_fbc_reg_params {
 	struct intel_fbc_reg_params {
+		struct i915_vma *vma;
+
 		struct {
 		struct {
 			enum pipe pipe;
 			enum pipe pipe;
 			enum plane plane;
 			enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
 		} crtc;
 		} crtc;
 
 
 		struct {
 		struct {
-			u64 ggtt_offset;
 			uint32_t pixel_format;
 			uint32_t pixel_format;
 			unsigned int stride;
 			unsigned int stride;
-			int fence_reg;
 		} fb;
 		} fb;
 
 
 		int cfb_size;
 		int cfb_size;
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
 	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 }
 
 
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-			    const struct i915_ggtt_view *view)
-{
-	return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);

+ 20 - 0
drivers/gpu/drm/i915/intel_atomic_plane.c

@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
 
 	__drm_atomic_helper_plane_duplicate_state(plane, state);
 	__drm_atomic_helper_plane_duplicate_state(plane, state);
 
 
+	intel_state->vma = NULL;
+
 	return state;
 	return state;
 }
 }
 
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
 intel_plane_destroy_state(struct drm_plane *plane,
 			  struct drm_plane_state *state)
 			  struct drm_plane_state *state)
 {
 {
+	struct i915_vma *vma;
+
+	vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+	/*
+	 * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+	 * We currently don't clear all planes during driver unload, so we have
+	 * to be able to unpin vma here for now.
+	 *
+	 * Normally this can only happen during unload when kmscon is disabled
+	 * and userspace doesn't attempt to set a framebuffer at all.
+	 */
+	if (vma) {
+		mutex_lock(&plane->dev->struct_mutex);
+		intel_unpin_fb_vma(vma);
+		mutex_unlock(&plane->dev->struct_mutex);
+	}
+
 	drm_atomic_helper_plane_destroy_state(plane, state);
 	drm_atomic_helper_plane_destroy_state(plane, state);
 }
 }
 
 

+ 42 - 83
drivers/gpu/drm/i915/intel_display.c

@@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 			i915_vma_pin_fence(vma);
 			i915_vma_pin_fence(vma);
 	}
 	}
 
 
+	i915_vma_get(vma);
 err:
 err:
 	intel_runtime_pm_put(dev_priv);
 	intel_runtime_pm_put(dev_priv);
 	return vma;
 	return vma;
 }
 }
 
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
 {
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct i915_ggtt_view view;
-	struct i915_vma *vma;
-
-	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-	intel_fill_fb_ggtt_view(&view, fb, rotation);
-	vma = i915_gem_object_to_ggtt(obj, &view);
+	lockdep_assert_held(&vma->vm->dev->struct_mutex);
 
 
 	if (WARN_ON_ONCE(!vma))
 	if (WARN_ON_ONCE(!vma))
 		return;
 		return;
 
 
 	i915_vma_unpin_fence(vma);
 	i915_vma_unpin_fence(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
+	i915_vma_put(vma);
 }
 }
 
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *c;
 	struct drm_crtc *c;
-	struct intel_crtc *i;
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
 	struct drm_plane *primary = intel_crtc->base.primary;
 	struct drm_plane *primary = intel_crtc->base.primary;
 	struct drm_plane_state *plane_state = primary->state;
 	struct drm_plane_state *plane_state = primary->state;
@@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	 * an fb with another CRTC instead
 	 * an fb with another CRTC instead
 	 */
 	 */
 	for_each_crtc(dev, c) {
 	for_each_crtc(dev, c) {
-		i = to_intel_crtc(c);
+		struct intel_plane_state *state;
 
 
 		if (c == &intel_crtc->base)
 		if (c == &intel_crtc->base)
 			continue;
 			continue;
 
 
-		if (!i->active)
+		if (!to_intel_crtc(c)->active)
 			continue;
 			continue;
 
 
-		fb = c->primary->fb;
-		if (!fb)
+		state = to_intel_plane_state(c->primary->state);
+		if (!state->vma)
 			continue;
 			continue;
 
 
-		obj = intel_fb_obj(fb);
-		if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+		if (intel_plane_ggtt_offset(state) == plane_config->base) {
+			fb = c->primary->fb;
 			drm_framebuffer_reference(fb);
 			drm_framebuffer_reference(fb);
 			goto valid_fb;
 			goto valid_fb;
 		}
 		}
@@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	return;
 	return;
 
 
 valid_fb:
 valid_fb:
+	mutex_lock(&dev->struct_mutex);
+	intel_state->vma =
+		intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR(intel_state->vma)) {
+		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+		intel_state->vma = NULL;
+		drm_framebuffer_unreference(fb);
+		return;
+	}
+
 	plane_state->src_x = 0;
 	plane_state->src_x = 0;
 	plane_state->src_y = 0;
 	plane_state->src_y = 0;
 	plane_state->src_w = fb->width << 16;
 	plane_state->src_w = fb->width << 16;
@@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	if (INTEL_GEN(dev_priv) >= 4) {
 	if (INTEL_GEN(dev_priv) >= 4) {
 		I915_WRITE(DSPSURF(plane),
 		I915_WRITE(DSPSURF(plane),
-			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_plane_ggtt_offset(plane_state) +
 			   intel_crtc->dspaddr_offset);
 			   intel_crtc->dspaddr_offset);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 		I915_WRITE(DSPLINOFF(plane), linear_offset);
 	} else {
 	} else {
 		I915_WRITE(DSPADDR(plane),
 		I915_WRITE(DSPADDR(plane),
-			   intel_fb_gtt_offset(fb, rotation) +
+			   intel_plane_ggtt_offset(plane_state) +
 			   intel_crtc->dspaddr_offset);
 			   intel_crtc->dspaddr_offset);
 	}
 	}
 	POSTING_READ(reg);
 	POSTING_READ(reg);
@@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
 
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 	I915_WRITE(DSPSURF(plane),
 	I915_WRITE(DSPSURF(plane),
-		   intel_fb_gtt_offset(fb, rotation) +
+		   intel_plane_ggtt_offset(plane_state) +
 		   intel_crtc->dspaddr_offset);
 		   intel_crtc->dspaddr_offset);
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 	}
 	}
 }
 }
 
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-			unsigned int rotation)
-{
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct i915_ggtt_view view;
-	struct i915_vma *vma;
-
-	intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-	vma = i915_gem_object_to_ggtt(obj, &view);
-	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-		 view.type))
-		return -1;
-
-	return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_device *dev = intel_crtc->base.dev;
@@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	}
 	}
 
 
 	I915_WRITE(PLANE_SURF(pipe, 0),
 	I915_WRITE(PLANE_SURF(pipe, 0),
-		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
+		   intel_plane_ggtt_offset(plane_state) + surf_addr);
 
 
 	POSTING_READ(PLANE_SURF(pipe, 0));
 	POSTING_READ(PLANE_SURF(pipe, 0));
 }
 }
@@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 		flush_work(&work->mmio_work);
 		flush_work(&work->mmio_work);
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+	intel_unpin_fb_vma(work->old_vma);
 	i915_gem_object_put(work->pending_flip_obj);
 	i915_gem_object_put(work->pending_flip_obj);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
@@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		goto cleanup_pending;
 		goto cleanup_pending;
 	}
 	}
 
 
-	work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-	work->gtt_offset += intel_crtc->dspaddr_offset;
+	work->old_vma = to_intel_plane_state(primary->state)->vma;
+	to_intel_plane_state(primary->state)->vma = vma;
+
+	work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
 	work->rotation = crtc->primary->state->rotation;
 	work->rotation = crtc->primary->state->rotation;
 
 
 	/*
 	/*
@@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
 cleanup_request:
 	i915_add_request_no_flush(request);
 	i915_add_request_no_flush(request);
 cleanup_unpin:
 cleanup_unpin:
-	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+	to_intel_plane_state(primary->state)->vma = work->old_vma;
+	intel_unpin_fb_vma(vma);
 cleanup_pending:
 cleanup_pending:
 	atomic_dec(&intel_crtc->unpin_work_count);
 	atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
 unlock:
@@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 			DRM_DEBUG_KMS("failed to pin object\n");
 			DRM_DEBUG_KMS("failed to pin object\n");
 			return PTR_ERR(vma);
 			return PTR_ERR(vma);
 		}
 		}
+
+		to_intel_plane_state(new_state)->vma = vma;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -14812,19 +14807,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
 intel_cleanup_plane_fb(struct drm_plane *plane,
 		       struct drm_plane_state *old_state)
 		       struct drm_plane_state *old_state)
 {
 {
-	struct drm_i915_private *dev_priv = to_i915(plane->dev);
-	struct intel_plane_state *old_intel_state;
-	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-	old_intel_state = to_intel_plane_state(old_state);
-
-	if (!obj && !old_obj)
-		return;
+	struct i915_vma *vma;
 
 
-	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-	    !INTEL_INFO(dev_priv)->cursor_needs_physical))
-		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+	/* Should only be called after a successful intel_prepare_plane_fb()! */
+	vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+	if (vma)
+		intel_unpin_fb_vma(vma);
 }
 }
 
 
 int
 int
@@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
 	if (!obj)
 	if (!obj)
 		addr = 0;
 		addr = 0;
 	else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
 	else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-		addr = i915_gem_object_ggtt_offset(obj, NULL);
+		addr = intel_plane_ggtt_offset(state);
 	else
 	else
 		addr = obj->phys_handle->busaddr;
 		addr = obj->phys_handle->busaddr;
 
 
@@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *c;
-	struct drm_i915_gem_object *obj;
 
 
 	intel_init_gt_powersave(dev_priv);
 	intel_init_gt_powersave(dev_priv);
 
 
 	intel_modeset_init_hw(dev);
 	intel_modeset_init_hw(dev);
 
 
 	intel_setup_overlay(dev_priv);
 	intel_setup_overlay(dev_priv);
-
-	/*
-	 * Make sure any fbs we allocated at startup are properly
-	 * pinned & fenced.  When we do the allocation it's too early
-	 * for this.
-	 */
-	for_each_crtc(dev, c) {
-		struct i915_vma *vma;
-
-		obj = intel_fb_obj(c->primary->fb);
-		if (obj == NULL)
-			continue;
-
-		mutex_lock(&dev->struct_mutex);
-		vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-						 c->primary->state->rotation);
-		mutex_unlock(&dev->struct_mutex);
-		if (IS_ERR(vma)) {
-			DRM_ERROR("failed to pin boot fb on pipe %d\n",
-				  to_intel_crtc(c)->pipe);
-			drm_framebuffer_unreference(c->primary->fb);
-			c->primary->fb = NULL;
-			c->primary->crtc = c->primary->state->crtc = NULL;
-			update_state_fb(c->primary);
-			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-		}
-	}
 }
 }
 
 
 int intel_connector_register(struct drm_connector *connector)
 int intel_connector_register(struct drm_connector *connector)

+ 7 - 2
drivers/gpu/drm/i915/intel_drv.h

@@ -377,6 +377,7 @@ struct intel_atomic_state {
 struct intel_plane_state {
 struct intel_plane_state {
 	struct drm_plane_state base;
 	struct drm_plane_state base;
 	struct drm_rect clip;
 	struct drm_rect clip;
+	struct i915_vma *vma;
 
 
 	struct {
 	struct {
 		u32 offset;
 		u32 offset;
@@ -1046,6 +1047,7 @@ struct intel_flip_work {
 	struct work_struct mmio_work;
 	struct work_struct mmio_work;
 
 
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
+	struct i915_vma *old_vma;
 	struct drm_framebuffer *old_fb;
 	struct drm_framebuffer *old_fb;
 	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_pending_vblank_event *event;
 	struct drm_pending_vblank_event *event;
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 				    struct drm_modeset_acquire_ctx *ctx);
 				    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+	return i915_ggtt_offset(state->vma);
+}
 
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);

+ 20 - 32
drivers/gpu/drm/i915/intel_fbc.c

@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 	if (IS_I945GM(dev_priv))
 	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-	fbc_ctl |= params->fb.fence_reg;
+	fbc_ctl |= params->vma->fence->id;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 }
 
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
 	else
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+	if (params->vma->fence) {
+		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	} else {
 	} else {
 		I915_WRITE(DPFC_FENCE_YOFF, 0);
 		I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 		break;
 	}
 	}
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+	if (params->vma->fence) {
 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
 		dpfc_ctl |= DPFC_CTL_FENCE_EN;
 		if (IS_GEN5(dev_priv))
 		if (IS_GEN5(dev_priv))
-			dpfc_ctl |= params->fb.fence_reg;
+			dpfc_ctl |= params->vma->fence->id;
 		if (IS_GEN6(dev_priv)) {
 		if (IS_GEN6(dev_priv)) {
 			I915_WRITE(SNB_DPFC_CTL_SA,
 			I915_WRITE(SNB_DPFC_CTL_SA,
-				   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+				   SNB_CPU_FENCE_ENABLE |
+				   params->vma->fence->id);
 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
 			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
 				   params->crtc.fence_y_offset);
 				   params->crtc.fence_y_offset);
 		}
 		}
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 	}
 	}
 
 
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-	I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_FBC_RT_BASE,
+		   i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 		break;
 		break;
 	}
 	}
 
 
-	if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+	if (params->vma->fence) {
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 		I915_WRITE(SNB_DPFC_CTL_SA,
 		I915_WRITE(SNB_DPFC_CTL_SA,
-			   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+			   SNB_CPU_FENCE_ENABLE |
+			   params->vma->fence->id);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 	} else {
 	} else {
 		I915_WRITE(SNB_DPFC_CTL_SA,0);
 		I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 	return effective_w <= max_w && effective_h <= max_h;
 	return effective_w <= max_w && effective_h <= max_h;
 }
 }
 
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-	struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-	return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 					 struct intel_crtc_state *crtc_state,
 					 struct intel_crtc_state *crtc_state,
 					 struct intel_plane_state *plane_state)
 					 struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_framebuffer *fb = plane_state->base.fb;
-	struct drm_i915_gem_object *obj;
+
+	cache->vma = NULL;
 
 
 	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
 	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	if (!cache->plane.visible)
 	if (!cache->plane.visible)
 		return;
 		return;
 
 
-	obj = intel_fb_obj(fb);
-
-	/* FIXME: We lack the proper locking here, so only run this on the
-	 * platforms that need. */
-	if (IS_GEN(dev_priv, 5, 6))
-		cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
 	cache->fb.pixel_format = fb->pixel_format;
 	cache->fb.pixel_format = fb->pixel_format;
 	cache->fb.stride = fb->pitches[0];
 	cache->fb.stride = fb->pitches[0];
-	cache->fb.fence_reg = get_fence_id(fb);
-	cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+	cache->vma = plane_state->vma;
 }
 }
 
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 		return false;
 		return false;
 	}
 	}
 
 
-	if (!cache->plane.visible) {
+	if (!cache->vma) {
 		fbc->no_fbc_reason = "primary plane not visible";
 		fbc->no_fbc_reason = "primary plane not visible";
 		return false;
 		return false;
 	}
 	}
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	 * so have no fence associated with it) due to aperture constaints
 	 * so have no fence associated with it) due to aperture constaints
 	 * at the time of pinning.
 	 * at the time of pinning.
 	 */
 	 */
-	if (cache->fb.tiling_mode != I915_TILING_X ||
-	    cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+	if (!cache->vma->fence) {
 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
 		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
 		return false;
 		return false;
 	}
 	}
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 	 * zero. */
 	 * zero. */
 	memset(params, 0, sizeof(*params));
 	memset(params, 0, sizeof(*params));
 
 
+	params->vma = cache->vma;
+
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.plane = crtc->plane;
 	params->crtc.plane = crtc->plane;
 	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
 
 	params->fb.pixel_format = cache->fb.pixel_format;
 	params->fb.pixel_format = cache->fb.pixel_format;
 	params->fb.stride = cache->fb.stride;
 	params->fb.stride = cache->fb.stride;
-	params->fb.fence_reg = cache->fb.fence_reg;
 
 
 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
 	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-	params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 }
 
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,

+ 2 - 2
drivers/gpu/drm/i915/intel_fbdev.c

@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
 out_destroy_fbi:
 	drm_fb_helper_release_fbi(helper);
 	drm_fb_helper_release_fbi(helper);
 out_unpin:
 out_unpin:
-	intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+	intel_unpin_fb_vma(vma);
 out_unlock:
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 	return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
 
 	if (ifbdev->fb) {
 	if (ifbdev->fb) {
 		mutex_lock(&ifbdev->helper.dev->struct_mutex);
 		mutex_lock(&ifbdev->helper.dev->struct_mutex);
-		intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+		intel_unpin_fb_vma(ifbdev->vma);
 		mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 		mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
 
 		drm_framebuffer_remove(&ifbdev->fb->base);
 		drm_framebuffer_remove(&ifbdev->fb->base);

+ 4 - 4
drivers/gpu/drm/i915/intel_sprite.c

@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
 
 	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 	I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 	I915_WRITE(PLANE_SURF(pipe, plane),
 	I915_WRITE(PLANE_SURF(pipe, plane),
-		   intel_fb_gtt_offset(fb, rotation) + surf_addr);
+		   intel_plane_ggtt_offset(plane_state) + surf_addr);
 	POSTING_READ(PLANE_SURF(pipe, plane));
 	POSTING_READ(PLANE_SURF(pipe, plane));
 }
 }
 
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
 	I915_WRITE(SPCNTR(pipe, plane), sprctl);
 	I915_WRITE(SPSURF(pipe, plane),
 	I915_WRITE(SPSURF(pipe, plane),
-		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 	POSTING_READ(SPSURF(pipe, plane));
 	POSTING_READ(SPSURF(pipe, plane));
 }
 }
 
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 		I915_WRITE(SPRSCALE(pipe), sprscale);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRCTL(pipe), sprctl);
 	I915_WRITE(SPRSURF(pipe),
 	I915_WRITE(SPRSURF(pipe),
-		   intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 	POSTING_READ(SPRSURF(pipe));
 	POSTING_READ(SPRSURF(pipe));
 }
 }
 
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSSCALE(pipe), dvsscale);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSCNTR(pipe), dvscntr);
 	I915_WRITE(DVSSURF(pipe),
 	I915_WRITE(DVSSURF(pipe),
-		   intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+		   intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
 	POSTING_READ(DVSSURF(pipe));
 	POSTING_READ(DVSSURF(pipe));
 }
 }
 
 

+ 2 - 1
drivers/gpu/drm/nouveau/dispnv04/hw.c

@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 		uint32_t mpllP;
 		uint32_t mpllP;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+		mpllP = (mpllP >> 8) & 0xf;
 		if (!mpllP)
 		if (!mpllP)
 			mpllP = 4;
 			mpllP = 4;
 
 
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
 		uint32_t clock;
 		uint32_t clock;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
-		return clock;
+		return clock / 1000;
 	}
 	}
 
 
 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_fence.h

@@ -99,6 +99,7 @@ struct nv84_fence_priv {
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo;
 	struct nouveau_bo *bo_gart;
 	struct nouveau_bo *bo_gart;
 	u32 *suspend;
 	u32 *suspend;
+	struct mutex mutex;
 };
 };
 
 
 int  nv84_fence_context_new(struct nouveau_channel *);
 int  nv84_fence_context_new(struct nouveau_channel *);

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_led.h

@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
 }
 }
 
 
 /* nouveau_led.c */
 /* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 int  nouveau_led_init(struct drm_device *dev);
 int  nouveau_led_init(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);

+ 2 - 1
drivers/gpu/drm/nouveau/nouveau_usif.c

@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
 	if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
 	if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
 		/* block access to objects not created via this interface */
 		/* block access to objects not created via this interface */
 		owner = argv->v0.owner;
 		owner = argv->v0.owner;
-		if (argv->v0.object == 0ULL)
+		if (argv->v0.object == 0ULL &&
+		    argv->v0.type != NVIF_IOCTL_V0_DEL)
 			argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
 			argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
 		else
 		else
 			argv->v0.owner = NVDRM_OBJECT_USIF;
 			argv->v0.owner = NVDRM_OBJECT_USIF;

+ 6 - 0
drivers/gpu/drm/nouveau/nv50_display.c

@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 		}
 	}
 	}
 
 
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (crtc->state->event)
+			drm_crtc_vblank_get(crtc);
+	}
+
 	/* Update plane(s). */
 	/* Update plane(s). */
 	for_each_plane_in_state(state, plane, plane_state, i) {
 	for_each_plane_in_state(state, plane, plane_state, i) {
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
 			spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 			spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 			crtc->state->event = NULL;
 			crtc->state->event = NULL;
+			drm_crtc_vblank_put(crtc);
 		}
 		}
 	}
 	}
 
 

+ 6 - 0
drivers/gpu/drm/nouveau/nv84_fence.c

@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
 	struct nv84_fence_chan *fctx = chan->fence;
 	struct nv84_fence_chan *fctx = chan->fence;
 
 
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
 	nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+	mutex_lock(&priv->mutex);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
 	nouveau_bo_vma_del(priv->bo, &fctx->vma);
+	mutex_unlock(&priv->mutex);
 	nouveau_fence_context_del(&fctx->base);
 	nouveau_fence_context_del(&fctx->base);
 	chan->fence = NULL;
 	chan->fence = NULL;
 	nouveau_fence_context_free(&fctx->base);
 	nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
 	fctx->base.sync32 = nv84_fence_sync32;
 	fctx->base.sync32 = nv84_fence_sync32;
 	fctx->base.sequence = nv84_fence_read(chan);
 	fctx->base.sequence = nv84_fence_read(chan);
 
 
+	mutex_lock(&priv->mutex);
 	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 	ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 	if (ret == 0) {
 	if (ret == 0) {
 		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 		ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 					&fctx->vma_gart);
 					&fctx->vma_gart);
 	}
 	}
+	mutex_unlock(&priv->mutex);
 
 
 	if (ret)
 	if (ret)
 		nv84_fence_context_del(chan);
 		nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
 	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 	priv->base.uevent = true;
 
 
+	mutex_init(&priv->mutex);
+
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
 	/* Use VRAM if there is any ; otherwise fallback to system memory */
 	domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 	domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 			 /*
 			 /*

+ 1 - 1
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c

@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
 			);
 			);
 		}
 		}
 		for (i = 0; i < size; i++)
 		for (i = 0; i < size; i++)
-			nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
 		for (; i < 0x60; i++)
 		for (; i < 0x60; i++)
 			nvkm_wr32(device, 0x61c440 + soff, (i << 8));
 			nvkm_wr32(device, 0x61c440 + soff, (i << 8));
 		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
 		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);

+ 0 - 2
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c

@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
 	case 0x94:
 	case 0x94:
 	case 0x96:
 	case 0x96:
 	case 0x98:
 	case 0x98:
-	case 0xaa:
-	case 0xac:
 		return true;
 		return true;
 	default:
 	default:
 		break;
 		break;

+ 2 - 1
drivers/gpu/drm/radeon/radeon_drv.c

@@ -97,9 +97,10 @@
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
  */
 #define KMS_DRIVER_MAJOR	2
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	48
+#define KMS_DRIVER_MINOR	49
 #define KMS_DRIVER_PATCHLEVEL	0
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
 int radeon_driver_unload_kms(struct drm_device *dev);

+ 2 - 2
drivers/gpu/drm/radeon/radeon_gem.c

@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 
 
 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
 
-	args->vram_size = rdev->mc.real_vram_size;
-	args->vram_visible = (u64)man->size << PAGE_SHIFT;
+	args->vram_size = (u64)man->size << PAGE_SHIFT;
+	args->vram_visible = rdev->mc.visible_vram_size;
 	args->vram_visible -= rdev->vram_pin_size;
 	args->vram_visible -= rdev->vram_pin_size;
 	args->gart_size = rdev->mc.gtt_size;
 	args->gart_size = rdev->mc.gtt_size;
 	args->gart_size -= rdev->gart_pin_size;
 	args->gart_size -= rdev->gart_pin_size;

+ 12 - 16
drivers/hid/hid-cp2112.c

@@ -168,7 +168,7 @@ struct cp2112_device {
 	atomic_t xfer_avail;
 	atomic_t xfer_avail;
 	struct gpio_chip gc;
 	struct gpio_chip gc;
 	u8 *in_out_buffer;
 	u8 *in_out_buffer;
-	spinlock_t lock;
+	struct mutex lock;
 
 
 	struct gpio_desc *desc[8];
 	struct gpio_desc *desc[8];
 	bool gpio_poll;
 	bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 	ret = 0;
 	ret = 0;
 
 
 exit:
 exit:
-	spin_unlock_irqrestore(&dev->lock, flags);
-	return ret <= 0 ? ret : -EIO;
+	mutex_unlock(&dev->lock);
+	return ret < 0 ? ret : -EIO;
 }
 }
 
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	buf[0] = CP2112_GPIO_SET;
 	buf[0] = CP2112_GPIO_SET;
 	buf[1] = value ? 0xff : 0;
 	buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 	if (ret < 0)
 	if (ret < 0)
 		hid_err(hdev, "error setting GPIO values: %d\n", ret);
 		hid_err(hdev, "error setting GPIO values: %d\n", ret);
 
 
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 }
 }
 
 
 static int cp2112_gpio_get_all(struct gpio_chip *chip)
 static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
 				 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
 				 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
 	ret = buf[1];
 	ret = buf[1];
 
 
 exit:
 exit:
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 		goto fail;
 		goto fail;
 	}
 	}
 
 
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 
 
 	/*
 	/*
 	 * Set gpio value when output direction is already set,
 	 * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 	return 0;
 	return 0;
 
 
 fail:
 fail:
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 	return ret < 0 ? ret : -EIO;
 	return ret < 0 ? ret : -EIO;
 }
 }
 
 
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
 	if (!dev->in_out_buffer)
 	if (!dev->in_out_buffer)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	spin_lock_init(&dev->lock);
+	mutex_init(&dev->lock);
 
 
 	ret = hid_parse(hdev);
 	ret = hid_parse(hdev);
 	if (ret) {
 	if (ret) {

+ 3 - 0
drivers/hid/hid-ids.h

@@ -76,6 +76,9 @@
 #define USB_VENDOR_ID_ALPS_JP		0x044E
 #define USB_VENDOR_ID_ALPS_JP		0x044E
 #define HID_DEVICE_ID_ALPS_U1_DUAL	0x120B
 #define HID_DEVICE_ID_ALPS_U1_DUAL	0x120B
 
 
+#define USB_VENDOR_ID_AMI		0x046b
+#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE	0xff10
+
 #define USB_VENDOR_ID_ANTON		0x1130
 #define USB_VENDOR_ID_ANTON		0x1130
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD	0x3101
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD	0x3101
 
 

+ 1 - 1
drivers/hid/hid-lg.c

@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
 		.driver_data = LG_NOGET | LG_FF4 },
 		.driver_data = LG_NOGET | LG_FF4 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
-		.driver_data = LG_FF2 },
+		.driver_data = LG_NOGET | LG_FF2 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
 		.driver_data = LG_FF3 },
 		.driver_data = LG_FF3 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),

+ 1 - 0
drivers/hid/usbhid/hid-quirks.c

@@ -57,6 +57,7 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },

+ 15 - 13
drivers/hid/wacom_wac.c

@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
 		wacom->id[0] = STYLUS_DEVICE_ID;
 		wacom->id[0] = STYLUS_DEVICE_ID;
 	}
 	}
 
 
-	pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
-	if (features->pressure_max > 255)
-		pressure = (pressure << 1) | ((data[4] >> 6) & 1);
-	pressure += (features->pressure_max + 1) / 2;
-
-	input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
-	input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
-	input_report_abs(input, ABS_PRESSURE, pressure);
-
-	input_report_key(input, BTN_TOUCH, data[4] & 0x08);
-	input_report_key(input, BTN_STYLUS, data[4] & 0x10);
-	/* Only allow the stylus2 button to be reported for the pen tool. */
-	input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+	if (prox) {
+		pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+		if (features->pressure_max > 255)
+			pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+		pressure += (features->pressure_max + 1) / 2;
+
+		input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+		input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+		input_report_abs(input, ABS_PRESSURE, pressure);
+
+		input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+		input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+		/* Only allow the stylus2 button to be reported for the pen tool. */
+		input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+	}
 
 
 	if (!prox)
 	if (!prox)
 		wacom->id[0] = 0;
 		wacom->id[0] = 0;

+ 1 - 0
drivers/hv/ring_buffer.c

@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 		return ret;
 		return ret;
 	}
 	}
 
 
+	init_cached_read_index(channel);
 	next_read_location = hv_get_next_read_location(inring_info);
 	next_read_location = hv_get_next_read_location(inring_info);
 	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
 	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
 						    sizeof(desc),
 						    sizeof(desc),

+ 2 - 2
drivers/iio/adc/palmas_gpadc.c

@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
 
 
 static int palmas_gpadc_suspend(struct device *dev)
 static int palmas_gpadc_suspend(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int ret;
 	int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
 
 
 static int palmas_gpadc_resume(struct device *dev)
 static int palmas_gpadc_resume(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	struct palmas_gpadc *adc = iio_priv(indio_dev);
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
 	int ret;
 	int ret;

+ 2 - 2
drivers/iio/health/afe4403.c

@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
 
 
 static int __maybe_unused afe4403_suspend(struct device *dev)
 static int __maybe_unused afe4403_suspend(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
 
 
 static int __maybe_unused afe4403_resume(struct device *dev)
 static int __maybe_unused afe4403_resume(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	struct afe4403_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 

+ 2 - 2
drivers/iio/health/afe4404.c

@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
 
 
 static int __maybe_unused afe4404_suspend(struct device *dev)
 static int __maybe_unused afe4404_suspend(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
 
 
 static int __maybe_unused afe4404_resume(struct device *dev)
 static int __maybe_unused afe4404_resume(struct device *dev)
 {
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	struct afe4404_data *afe = iio_priv(indio_dev);
 	int ret;
 	int ret;
 
 

+ 1 - 1
drivers/iio/health/max30100.c

@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
 
 
 	mutex_lock(&data->lock);
 	mutex_lock(&data->lock);
 
 
-	while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+	while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
 		ret = max30100_read_measurement(data);
 		ret = max30100_read_measurement(data);
 		if (ret)
 		if (ret)
 			break;
 			break;

+ 4 - 2
drivers/iio/humidity/dht11.c

@@ -71,7 +71,8 @@
  * a) select an implementation using busy loop polling on those systems
  * a) select an implementation using busy loop polling on those systems
  * b) use the checksum to do some probabilistic decoding
  * b) use the checksum to do some probabilistic decoding
  */
  */
-#define DHT11_START_TRANSMISSION	18  /* ms */
+#define DHT11_START_TRANSMISSION_MIN	18000  /* us */
+#define DHT11_START_TRANSMISSION_MAX	20000  /* us */
 #define DHT11_MIN_TIMERES	34000  /* ns */
 #define DHT11_MIN_TIMERES	34000  /* ns */
 #define DHT11_THRESHOLD		49000  /* ns */
 #define DHT11_THRESHOLD		49000  /* ns */
 #define DHT11_AMBIG_LOW		23000  /* ns */
 #define DHT11_AMBIG_LOW		23000  /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
 		ret = gpio_direction_output(dht11->gpio, 0);
 		ret = gpio_direction_output(dht11->gpio, 0);
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;
-		msleep(DHT11_START_TRANSMISSION);
+		usleep_range(DHT11_START_TRANSMISSION_MIN,
+			     DHT11_START_TRANSMISSION_MAX);
 		ret = gpio_direction_input(dht11->gpio);
 		ret = gpio_direction_input(dht11->gpio);
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;

+ 2 - 2
drivers/input/rmi4/rmi_driver.c

@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
 	data->enabled = true;
 	data->enabled = true;
 	if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 	if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 		retval = disable_irq_wake(irq);
 		retval = disable_irq_wake(irq);
-		if (!retval)
+		if (retval)
 			dev_warn(&rmi_dev->dev,
 			dev_warn(&rmi_dev->dev,
 				 "Failed to disable irq for wake: %d\n",
 				 "Failed to disable irq for wake: %d\n",
 				 retval);
 				 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
 	disable_irq(irq);
 	disable_irq(irq);
 	if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 	if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 		retval = enable_irq_wake(irq);
 		retval = enable_irq_wake(irq);
-		if (!retval)
+		if (retval)
 			dev_warn(&rmi_dev->dev,
 			dev_warn(&rmi_dev->dev,
 				 "Failed to enable irq for wake: %d\n",
 				 "Failed to enable irq for wake: %d\n",
 				 retval);
 				 retval);

+ 1 - 1
drivers/input/touchscreen/wm97xx-core.c

@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
 	}
 	}
 	platform_set_drvdata(wm->battery_dev, wm);
 	platform_set_drvdata(wm->battery_dev, wm);
 	wm->battery_dev->dev.parent = dev;
 	wm->battery_dev->dev.parent = dev;
-	wm->battery_dev->dev.platform_data = pdata->batt_pdata;
+	wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
 	ret = platform_device_add(wm->battery_dev);
 	ret = platform_device_add(wm->battery_dev);
 	if (ret < 0)
 	if (ret < 0)
 		goto batt_reg_err;
 		goto batt_reg_err;

+ 2 - 1
drivers/mmc/host/sdhci.c

@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
 		if (intmask & SDHCI_INT_RETUNE)
 		if (intmask & SDHCI_INT_RETUNE)
 			mmc_retune_needed(host->mmc);
 			mmc_retune_needed(host->mmc);
 
 
-		if (intmask & SDHCI_INT_CARD_INT) {
+		if ((intmask & SDHCI_INT_CARD_INT) &&
+		    (host->ier & SDHCI_INT_CARD_INT)) {
 			sdhci_enable_sdio_irq_nolock(host, false);
 			sdhci_enable_sdio_irq_nolock(host, false);
 			host->thread_isr |= SDHCI_INT_CARD_INT;
 			host->thread_isr |= SDHCI_INT_CARD_INT;
 			result = IRQ_WAKE_THREAD;
 			result = IRQ_WAKE_THREAD;

+ 43 - 2
drivers/net/ethernet/adaptec/starfire.c

@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
 		if (skb == NULL)
 		if (skb == NULL)
 			break;
 			break;
 		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(np->pci_dev,
+					  np->rx_info[i].mapping)) {
+			dev_kfree_skb(skb);
+			np->rx_info[i].skb = NULL;
+			break;
+		}
 		/* Grrr, we cannot offset to correctly align the IP header. */
 		/* Grrr, we cannot offset to correctly align the IP header. */
 		np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
 		np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
 	}
 	}
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 {
 {
 	struct netdev_private *np = netdev_priv(dev);
 	struct netdev_private *np = netdev_priv(dev);
 	unsigned int entry;
 	unsigned int entry;
+	unsigned int prev_tx;
 	u32 status;
 	u32 status;
-	int i;
+	int i, j;
 
 
 	/*
 	/*
 	 * be cautious here, wrapping the queue has weird semantics
 	 * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 	}
 	}
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 
 
+	prev_tx = np->cur_tx;
 	entry = np->cur_tx % TX_RING_SIZE;
 	entry = np->cur_tx % TX_RING_SIZE;
 	for (i = 0; i < skb_num_frags(skb); i++) {
 	for (i = 0; i < skb_num_frags(skb); i++) {
 		int wrap_ring = 0;
 		int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 					       skb_frag_size(this_frag),
 					       skb_frag_size(this_frag),
 					       PCI_DMA_TODEVICE);
 					       PCI_DMA_TODEVICE);
 		}
 		}
+		if (pci_dma_mapping_error(np->pci_dev,
+					  np->tx_info[entry].mapping)) {
+			dev->stats.tx_dropped++;
+			goto err_out;
+		}
 
 
 		np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
 		np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
 		np->tx_ring[entry].status = cpu_to_le32(status);
 		np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 		netif_stop_queue(dev);
 		netif_stop_queue(dev);
 
 
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
-}
 
 
+err_out:
+	entry = prev_tx % TX_RING_SIZE;
+	np->tx_info[entry].skb = NULL;
+	if (i > 0) {
+		pci_unmap_single(np->pci_dev,
+				 np->tx_info[entry].mapping,
+				 skb_first_frag_len(skb),
+				 PCI_DMA_TODEVICE);
+		np->tx_info[entry].mapping = 0;
+		entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+		for (j = 1; j < i; j++) {
+			pci_unmap_single(np->pci_dev,
+					 np->tx_info[entry].mapping,
+					 skb_frag_size(
+						&skb_shinfo(skb)->frags[j-1]),
+					 PCI_DMA_TODEVICE);
+			entry++;
+		}
+	}
+	dev_kfree_skb_any(skb);
+	np->cur_tx = prev_tx;
+	return NETDEV_TX_OK;
+}
 
 
 /* The interrupt handler does all of the Rx thread work and cleans up
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
    after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
 				break;	/* Better luck next round. */
 				break;	/* Better luck next round. */
 			np->rx_info[entry].mapping =
 			np->rx_info[entry].mapping =
 				pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 				pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(np->pci_dev,
+						np->rx_info[entry].mapping)) {
+				dev_kfree_skb(skb);
+				np->rx_info[entry].skb = NULL;
+				break;
+			}
 			np->rx_ring[entry].rxaddr =
 			np->rx_ring[entry].rxaddr =
 				cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
 				cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
 		}
 		}

+ 130 - 58
drivers/net/ethernet/cadence/macb.c

@@ -43,13 +43,13 @@
 #define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
 #define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
 #define MIN_RX_RING_SIZE	64
 #define MIN_RX_RING_SIZE	64
 #define MAX_RX_RING_SIZE	8192
 #define MAX_RX_RING_SIZE	8192
-#define RX_RING_BYTES(bp)	(sizeof(struct macb_dma_desc)	\
+#define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
 				 * (bp)->rx_ring_size)
 				 * (bp)->rx_ring_size)
 
 
 #define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
 #define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
 #define MIN_TX_RING_SIZE	64
 #define MIN_TX_RING_SIZE	64
 #define MAX_TX_RING_SIZE	4096
 #define MAX_TX_RING_SIZE	4096
-#define TX_RING_BYTES(bp)	(sizeof(struct macb_dma_desc)	\
+#define TX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
 				 * (bp)->tx_ring_size)
 				 * (bp)->tx_ring_size)
 
 
 /* level of occupied TX descriptors under which we wake up TX process */
 /* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
  */
  */
 #define MACB_HALT_TIMEOUT	1230
 #define MACB_HALT_TIMEOUT	1230
 
 
+/* DMA buffer descriptor might be different size
+ * depends on hardware configuration.
+ */
+static unsigned int macb_dma_desc_get_size(struct macb *bp)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#endif
+	return sizeof(struct macb_dma_desc);
+}
+
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	/* Dma buffer descriptor is 4 words length (instead of 2 words)
+	 * for 64b GEM.
+	 */
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		idx <<= 1;
+#endif
+	return idx;
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
+{
+	return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+}
+#endif
+
 /* Ring buffer accessors */
 /* Ring buffer accessors */
 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 {
 {
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 					  unsigned int index)
 					  unsigned int index)
 {
 {
-	return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
+	index = macb_tx_ring_wrap(queue->bp, index);
+	index = macb_adj_dma_desc_idx(queue->bp, index);
+	return &queue->tx_ring[index];
 }
 }
 
 
 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
 	dma_addr_t offset;
 	dma_addr_t offset;
 
 
 	offset = macb_tx_ring_wrap(queue->bp, index) *
 	offset = macb_tx_ring_wrap(queue->bp, index) *
-		 sizeof(struct macb_dma_desc);
+			macb_dma_desc_get_size(queue->bp);
 
 
 	return queue->tx_ring_dma + offset;
 	return queue->tx_ring_dma + offset;
 }
 }
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
 
 
 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 {
 {
-	return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
+	index = macb_rx_ring_wrap(bp, index);
+	index = macb_adj_dma_desc_idx(bp, index);
+	return &bp->rx_ring[index];
 }
 }
 
 
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
 	}
 	}
 }
 }
 
 
-static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
 {
 {
-	desc->addr = (u32)addr;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	desc->addrh = (u32)(addr >> 32);
+	struct macb_dma_desc_64 *desc_64;
+
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+		desc_64 = macb_64b_desc(bp, desc);
+		desc_64->addrh = upper_32_bits(addr);
+	}
 #endif
 #endif
+	desc->addr = lower_32_bits(addr);
+}
+
+static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+{
+	dma_addr_t addr = 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	struct macb_dma_desc_64 *desc_64;
+
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+		desc_64 = macb_64b_desc(bp, desc);
+		addr = ((u64)(desc_64->addrh) << 32);
+	}
+#endif
+	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+	return addr;
 }
 }
 
 
 static void macb_tx_error_task(struct work_struct *work)
 static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
 
 
 	/* Set end of TX queue */
 	/* Set end of TX queue */
 	desc = macb_tx_desc(queue, 0);
 	desc = macb_tx_desc(queue, 0);
-	macb_set_addr(desc, 0);
+	macb_set_addr(bp, desc, 0);
 	desc->ctrl = MACB_BIT(TX_USED);
 	desc->ctrl = MACB_BIT(TX_USED);
 
 
 	/* Make descriptor updates visible to hardware */
 	/* Make descriptor updates visible to hardware */
 	wmb();
 	wmb();
 
 
 	/* Reinitialize the TX desc queue */
 	/* Reinitialize the TX desc queue */
-	queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 #endif
 	/* Make TX ring reflect state of hardware */
 	/* Make TX ring reflect state of hardware */
 	queue->tx_head = 0;
 	queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
 	unsigned int		entry;
 	unsigned int		entry;
 	struct sk_buff		*skb;
 	struct sk_buff		*skb;
 	dma_addr_t		paddr;
 	dma_addr_t		paddr;
+	struct macb_dma_desc *desc;
 
 
 	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
 	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
 			  bp->rx_ring_size) > 0) {
 			  bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
 		rmb();
 		rmb();
 
 
 		bp->rx_prepared_head++;
 		bp->rx_prepared_head++;
+		desc = macb_rx_desc(bp, entry);
 
 
 		if (!bp->rx_skbuff[entry]) {
 		if (!bp->rx_skbuff[entry]) {
 			/* allocate sk_buff for this free entry in ring */
 			/* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
 
 
 			if (entry == bp->rx_ring_size - 1)
 			if (entry == bp->rx_ring_size - 1)
 				paddr |= MACB_BIT(RX_WRAP);
 				paddr |= MACB_BIT(RX_WRAP);
-			macb_set_addr(&(bp->rx_ring[entry]), paddr);
-			bp->rx_ring[entry].ctrl = 0;
+			macb_set_addr(bp, desc, paddr);
+			desc->ctrl = 0;
 
 
 			/* properly align Ethernet header */
 			/* properly align Ethernet header */
 			skb_reserve(skb, NET_IP_ALIGN);
 			skb_reserve(skb, NET_IP_ALIGN);
 		} else {
 		} else {
-			bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
-			bp->rx_ring[entry].ctrl = 0;
+			desc->addr &= ~MACB_BIT(RX_USED);
+			desc->ctrl = 0;
 		}
 		}
 	}
 	}
 
 
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
 		bool rxused;
 		bool rxused;
 
 
 		entry = macb_rx_ring_wrap(bp, bp->rx_tail);
 		entry = macb_rx_ring_wrap(bp, bp->rx_tail);
-		desc = &bp->rx_ring[entry];
+		desc = macb_rx_desc(bp, entry);
 
 
 		/* Make hw descriptor updates visible to CPU */
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 		rmb();
 
 
 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
-		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		addr |= ((u64)(desc->addrh) << 32);
-#endif
+		addr = macb_get_addr(bp, desc);
 		ctrl = desc->ctrl;
 		ctrl = desc->ctrl;
 
 
 		if (!rxused)
 		if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 static inline void macb_init_rx_ring(struct macb *bp)
 static inline void macb_init_rx_ring(struct macb *bp)
 {
 {
 	dma_addr_t addr;
 	dma_addr_t addr;
+	struct macb_dma_desc *desc = NULL;
 	int i;
 	int i;
 
 
 	addr = bp->rx_buffers_dma;
 	addr = bp->rx_buffers_dma;
 	for (i = 0; i < bp->rx_ring_size; i++) {
 	for (i = 0; i < bp->rx_ring_size; i++) {
-		bp->rx_ring[i].addr = addr;
-		bp->rx_ring[i].ctrl = 0;
+		desc = macb_rx_desc(bp, i);
+		macb_set_addr(bp, desc, addr);
+		desc->ctrl = 0;
 		addr += bp->rx_buffer_size;
 		addr += bp->rx_buffer_size;
 	}
 	}
-	bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+	desc->addr |= MACB_BIT(RX_WRAP);
 	bp->rx_tail = 0;
 	bp->rx_tail = 0;
 }
 }
 
 
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
 
 
 	for (tail = bp->rx_tail; budget > 0; tail++) {
 	for (tail = bp->rx_tail; budget > 0; tail++) {
 		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
 		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
-		u32 addr, ctrl;
+		u32 ctrl;
 
 
 		/* Make hw descriptor updates visible to CPU */
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 		rmb();
 
 
-		addr = desc->addr;
 		ctrl = desc->ctrl;
 		ctrl = desc->ctrl;
 
 
-		if (!(addr & MACB_BIT(RX_USED)))
+		if (!(desc->addr & MACB_BIT(RX_USED)))
 			break;
 			break;
 
 
 		if (ctrl & MACB_BIT(RX_SOF)) {
 		if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
 	i = tx_head;
 	i = tx_head;
 	entry = macb_tx_ring_wrap(bp, i);
 	entry = macb_tx_ring_wrap(bp, i);
 	ctrl = MACB_BIT(TX_USED);
 	ctrl = MACB_BIT(TX_USED);
-	desc = &queue->tx_ring[entry];
+	desc = macb_tx_desc(queue, entry);
 	desc->ctrl = ctrl;
 	desc->ctrl = ctrl;
 
 
 	if (lso_ctrl) {
 	if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
 		i--;
 		i--;
 		entry = macb_tx_ring_wrap(bp, i);
 		entry = macb_tx_ring_wrap(bp, i);
 		tx_skb = &queue->tx_skb[entry];
 		tx_skb = &queue->tx_skb[entry];
-		desc = &queue->tx_ring[entry];
+		desc = macb_tx_desc(queue, entry);
 
 
 		ctrl = (u32)tx_skb->size;
 		ctrl = (u32)tx_skb->size;
 		if (eof) {
 		if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
 			ctrl |= MACB_BF(MSS_MFS, mss_mfs);
 			ctrl |= MACB_BF(MSS_MFS, mss_mfs);
 
 
 		/* Set TX buffer descriptor */
 		/* Set TX buffer descriptor */
-		macb_set_addr(desc, tx_skb->mapping);
+		macb_set_addr(bp, desc, tx_skb->mapping);
 		/* desc->addr must be visible to hardware before clearing
 		/* desc->addr must be visible to hardware before clearing
 		 * 'TX_USED' bit in desc->ctrl.
 		 * 'TX_USED' bit in desc->ctrl.
 		 */
 		 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
 		if (!skb)
 		if (!skb)
 			continue;
 			continue;
 
 
-		desc = &bp->rx_ring[i];
-		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		addr |= ((u64)(desc->addrh) << 32);
-#endif
+		desc = macb_rx_desc(bp, i);
+		addr = macb_get_addr(bp, desc);
+
 		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
 		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
 				 DMA_FROM_DEVICE);
 				 DMA_FROM_DEVICE);
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
 static void gem_init_rings(struct macb *bp)
 static void gem_init_rings(struct macb *bp)
 {
 {
 	struct macb_queue *queue;
 	struct macb_queue *queue;
+	struct macb_dma_desc *desc = NULL;
 	unsigned int q;
 	unsigned int q;
 	int i;
 	int i;
 
 
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 		for (i = 0; i < bp->tx_ring_size; i++) {
 		for (i = 0; i < bp->tx_ring_size; i++) {
-			queue->tx_ring[i].addr = 0;
-			queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+			desc = macb_tx_desc(queue, i);
+			macb_set_addr(bp, desc, 0);
+			desc->ctrl = MACB_BIT(TX_USED);
 		}
 		}
-		queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+		desc->ctrl |= MACB_BIT(TX_WRAP);
 		queue->tx_head = 0;
 		queue->tx_head = 0;
 		queue->tx_tail = 0;
 		queue->tx_tail = 0;
 	}
 	}
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 {
 {
 	int i;
 	int i;
+	struct macb_dma_desc *desc = NULL;
 
 
 	macb_init_rx_ring(bp);
 	macb_init_rx_ring(bp);
 
 
 	for (i = 0; i < bp->tx_ring_size; i++) {
 	for (i = 0; i < bp->tx_ring_size; i++) {
-		bp->queues[0].tx_ring[i].addr = 0;
-		bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+		desc = macb_tx_desc(&bp->queues[0], i);
+		macb_set_addr(bp, desc, 0);
+		desc->ctrl = MACB_BIT(TX_USED);
 	}
 	}
 	bp->queues[0].tx_head = 0;
 	bp->queues[0].tx_head = 0;
 	bp->queues[0].tx_tail = 0;
 	bp->queues[0].tx_tail = 0;
-	bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+	desc->ctrl |= MACB_BIT(TX_WRAP);
 }
 }
 
 
 static void macb_reset_hw(struct macb *bp)
 static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
 			dmacfg &= ~GEM_BIT(TXCOEN);
 			dmacfg &= ~GEM_BIT(TXCOEN);
 
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		dmacfg |= GEM_BIT(ADDR64);
+		if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+			dmacfg |= GEM_BIT(ADDR64);
 #endif
 #endif
 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
 			   dmacfg);
 			   dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
 	macb_configure_dma(bp);
 	macb_configure_dma(bp);
 
 
 	/* Initialize TX and RX buffers */
 	/* Initialize TX and RX buffers */
-	macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+	macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
 #endif
 #endif
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-		queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+		if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+			queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 #endif
 
 
 		/* Enable interrupts */
 		/* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
 			queue->IMR  = GEM_IMR(hw_q - 1);
 			queue->IMR  = GEM_IMR(hw_q - 1);
 			queue->TBQP = GEM_TBQP(hw_q - 1);
 			queue->TBQP = GEM_TBQP(hw_q - 1);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			queue->TBQPH = GEM_TBQPH(hw_q -1);
+			if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+				queue->TBQPH = GEM_TBQPH(hw_q - 1);
 #endif
 #endif
 		} else {
 		} else {
 			/* queue0 uses legacy registers */
 			/* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
 			queue->IMR  = MACB_IMR;
 			queue->IMR  = MACB_IMR;
 			queue->TBQP = MACB_TBQP;
 			queue->TBQP = MACB_TBQP;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			queue->TBQPH = MACB_TBQPH;
+			if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+				queue->TBQPH = MACB_TBQPH;
 #endif
 #endif
 		}
 		}
 
 
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
 static int at91ether_start(struct net_device *dev)
 static int at91ether_start(struct net_device *dev)
 {
 {
 	struct macb *lp = netdev_priv(dev);
 	struct macb *lp = netdev_priv(dev);
+	struct macb_dma_desc *desc;
 	dma_addr_t addr;
 	dma_addr_t addr;
 	u32 ctl;
 	u32 ctl;
 	int i;
 	int i;
 
 
 	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 					 (AT91ETHER_MAX_RX_DESCR *
 					 (AT91ETHER_MAX_RX_DESCR *
-					  sizeof(struct macb_dma_desc)),
+					  macb_dma_desc_get_size(lp)),
 					 &lp->rx_ring_dma, GFP_KERNEL);
 					 &lp->rx_ring_dma, GFP_KERNEL);
 	if (!lp->rx_ring)
 	if (!lp->rx_ring)
 		return -ENOMEM;
 		return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
 	if (!lp->rx_buffers) {
 	if (!lp->rx_buffers) {
 		dma_free_coherent(&lp->pdev->dev,
 		dma_free_coherent(&lp->pdev->dev,
 				  AT91ETHER_MAX_RX_DESCR *
 				  AT91ETHER_MAX_RX_DESCR *
-				  sizeof(struct macb_dma_desc),
+				  macb_dma_desc_get_size(lp),
 				  lp->rx_ring, lp->rx_ring_dma);
 				  lp->rx_ring, lp->rx_ring_dma);
 		lp->rx_ring = NULL;
 		lp->rx_ring = NULL;
 		return -ENOMEM;
 		return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
 
 
 	addr = lp->rx_buffers_dma;
 	addr = lp->rx_buffers_dma;
 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
-		lp->rx_ring[i].addr = addr;
-		lp->rx_ring[i].ctrl = 0;
+		desc = macb_rx_desc(lp, i);
+		macb_set_addr(lp, desc, addr);
+		desc->ctrl = 0;
 		addr += AT91ETHER_MAX_RBUFF_SZ;
 		addr += AT91ETHER_MAX_RBUFF_SZ;
 	}
 	}
 
 
 	/* Set the Wrap bit on the last descriptor */
 	/* Set the Wrap bit on the last descriptor */
-	lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+	desc->addr |= MACB_BIT(RX_WRAP);
 
 
 	/* Reset buffer index */
 	/* Reset buffer index */
 	lp->rx_tail = 0;
 	lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
 
 
 	dma_free_coherent(&lp->pdev->dev,
 	dma_free_coherent(&lp->pdev->dev,
 			  AT91ETHER_MAX_RX_DESCR *
 			  AT91ETHER_MAX_RX_DESCR *
-			  sizeof(struct macb_dma_desc),
+			  macb_dma_desc_get_size(lp),
 			  lp->rx_ring, lp->rx_ring_dma);
 			  lp->rx_ring, lp->rx_ring_dma);
 	lp->rx_ring = NULL;
 	lp->rx_ring = NULL;
 
 
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 {
 {
 	struct macb *lp = netdev_priv(dev);
 	struct macb *lp = netdev_priv(dev);
+	struct macb_dma_desc *desc;
 	unsigned char *p_recv;
 	unsigned char *p_recv;
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	unsigned int pktlen;
 	unsigned int pktlen;
 
 
-	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+	desc = macb_rx_desc(lp, lp->rx_tail);
+	while (desc->addr & MACB_BIT(RX_USED)) {
 		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
 		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
-		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
 		skb = netdev_alloc_skb(dev, pktlen + 2);
 		skb = netdev_alloc_skb(dev, pktlen + 2);
 		if (skb) {
 		if (skb) {
 			skb_reserve(skb, 2);
 			skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
 			lp->stats.rx_dropped++;
 			lp->stats.rx_dropped++;
 		}
 		}
 
 
-		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+		if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
 			lp->stats.multicast++;
 			lp->stats.multicast++;
 
 
 		/* reset ownership bit */
 		/* reset ownership bit */
-		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+		desc->addr &= ~MACB_BIT(RX_USED);
 
 
 		/* wrap after last buffer */
 		/* wrap after last buffer */
 		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
 		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
 			lp->rx_tail = 0;
 			lp->rx_tail = 0;
 		else
 		else
 			lp->rx_tail++;
 			lp->rx_tail++;
+
+		desc = macb_rx_desc(lp, lp->rx_tail);
 	}
 	}
 }
 }
 
 
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
 	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
 		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
 		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+		bp->hw_dma_cap = HW_DMA_CAP_64B;
+	} else
+		bp->hw_dma_cap = HW_DMA_CAP_32B;
 #endif
 #endif
 
 
 	spin_lock_init(&bp->lock);
 	spin_lock_init(&bp->lock);

+ 17 - 3
drivers/net/ethernet/cadence/macb.h

@@ -385,6 +385,8 @@
 /* Bitfields in DCFG6. */
 /* Bitfields in DCFG6. */
 #define GEM_PBUF_LSO_OFFSET			27
 #define GEM_PBUF_LSO_OFFSET			27
 #define GEM_PBUF_LSO_SIZE			1
 #define GEM_PBUF_LSO_SIZE			1
+#define GEM_DAW64_OFFSET			23
+#define GEM_DAW64_SIZE				1
 
 
 /* Constants for CLK */
 /* Constants for CLK */
 #define MACB_CLK_DIV8				0
 #define MACB_CLK_DIV8				0
@@ -487,12 +489,20 @@
 struct macb_dma_desc {
 struct macb_dma_desc {
 	u32	addr;
 	u32	addr;
 	u32	ctrl;
 	u32	ctrl;
+};
+
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	u32     addrh;
-	u32     resvd;
-#endif
+enum macb_hw_dma_cap {
+	HW_DMA_CAP_32B,
+	HW_DMA_CAP_64B,
 };
 };
 
 
+struct macb_dma_desc_64 {
+	u32 addrh;
+	u32 resvd;
+};
+#endif
+
 /* DMA descriptor bitfields */
 /* DMA descriptor bitfields */
 #define MACB_RX_USED_OFFSET			0
 #define MACB_RX_USED_OFFSET			0
 #define MACB_RX_USED_SIZE			1
 #define MACB_RX_USED_SIZE			1
@@ -874,6 +884,10 @@ struct macb {
 	unsigned int		jumbo_max_len;
 	unsigned int		jumbo_max_len;
 
 
 	u32			wol;
 	u32			wol;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	enum macb_hw_dma_cap hw_dma_cap;
+#endif
 };
 };
 
 
 static inline bool macb_is_gem(struct macb *bp)
 static inline bool macb_is_gem(struct macb *bp)

+ 1 - 2
drivers/net/ethernet/cavium/thunder/thunder_xcv.c

@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
 	int speed = 2;
 	int speed = 2;
 
 
 	if (!xcv) {
 	if (!xcv) {
-		dev_err(&xcv->pdev->dev,
-			"XCV init not done, probe may have failed\n");
+		pr_err("XCV init not done, probe may have failed\n");
 		return;
 		return;
 	}
 	}
 
 

+ 28 - 5
drivers/net/ethernet/emulex/benet/be_main.c

@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
 		status = -EPERM;
 		status = -EPERM;
 		goto err;
 		goto err;
 	}
 	}
-done:
+
+	/* Remember currently programmed MAC */
 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
 	return 0;
 	return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
 {
 {
 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT))
+	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
+		eth_zero_addr(adapter->dev_mac);
+	}
 
 
 	be_clear_uc_list(adapter);
 	be_clear_uc_list(adapter);
 	be_clear_mc_list(adapter);
 	be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
 	if (status)
 	if (status)
 		return status;
 		return status;
 
 
-	/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
-	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+	/* Normally this condition usually true as the ->dev_mac is zeroed.
+	 * But on BE3 VFs the initial MAC is pre-programmed by PF and
+	 * subsequent be_dev_mac_add() can fail (after fresh boot)
+	 */
+	if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+		int old_pmac_id = -1;
+
+		/* Remember old programmed MAC if any - can happen on BE3 VF */
+		if (!is_zero_ether_addr(adapter->dev_mac))
+			old_pmac_id = adapter->pmac_id[0];
+
 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
 		if (status)
 		if (status)
 			return status;
 			return status;
+
+		/* Delete the old programmed MAC as we successfully programmed
+		 * a new MAC
+		 */
+		if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+			be_dev_mac_del(adapter, old_pmac_id);
+
 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
 	}
 	}
 
 
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
 
 
 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+		/* Initial MAC for BE3 VFs is already programmed by PF */
+		if (BEx_chip(adapter) && be_virtfn(adapter))
+			memcpy(adapter->dev_mac, mac, ETH_ALEN);
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 2 - 2
drivers/net/ethernet/freescale/gianfar.c

@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 		if (!rxb->page)
 		if (!rxb->page)
 			continue;
 			continue;
 
 
-		dma_unmap_single(rx_queue->dev, rxb->dma,
-				 PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_unmap_page(rx_queue->dev, rxb->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
 		__free_page(rxb->page);
 		__free_page(rxb->page);
 
 
 		rxb->page = NULL;
 		rxb->page = NULL;

+ 1 - 1
drivers/net/ethernet/mellanox/mlx4/catas.c

@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
 	return -ETIMEDOUT;
 	return -ETIMEDOUT;
 }
 }
 
 
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
 {
 {
 	return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
 	return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
 		(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
 		(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;

+ 12 - 0
drivers/net/ethernet/mellanox/mlx4/intf.c

@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
 		return;
 		return;
 
 
 	mlx4_stop_catas_poll(dev);
 	mlx4_stop_catas_poll(dev);
+	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+	    mlx4_is_slave(dev)) {
+		/* In mlx4_remove_one on a VF */
+		u32 slave_read =
+			swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+		if (mlx4_comm_internal_err(slave_read)) {
+			mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+				 __func__);
+			mlx4_enter_error_state(dev->persist);
+		}
+	}
 	mutex_lock(&intf_mutex);
 	mutex_lock(&intf_mutex);
 
 
 	list_for_each_entry(intf, &intf_list, list)
 	list_for_each_entry(intf, &intf_list, list)

+ 1 - 0
drivers/net/ethernet/mellanox/mlx4/mlx4.h

@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
 
 
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
 		    enum mlx4_port_type *type);
 		    enum mlx4_port_type *type);

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c

@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
 	if (cmd->cmdif_rev > CMD_IF_REV) {
 	if (cmd->cmdif_rev > CMD_IF_REV) {
 		dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
 		dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
 			CMD_IF_REV, cmd->cmdif_rev);
 			CMD_IF_REV, cmd->cmdif_rev);
-		err = -ENOTSUPP;
+		err = -EOPNOTSUPP;
 		goto err_free_page;
 		goto err_free_page;
 	}
 	}
 
 

+ 4 - 3
drivers/net/ethernet/mellanox/mlx5/core/en.h

@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 
 
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+				    enum mlx5e_traffic_types tt);
 
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
 
 
 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 {
 {
-	return -ENOTSUPP;
+	return -EOPNOTSUPP;
 }
 }
 
 
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 {
 {
-	return -ENOTSUPP;
+	return -EOPNOTSUPP;
 }
 }
 #else
 #else
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);

+ 8 - 3
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c

@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
 	int i;
 	int i;
 
 
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
 	for (i = 0; i < ets->ets_cap; i++) {
 	for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
 	int err;
 	int err;
 
 
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	err = mlx5e_dbcnl_validate_ets(netdev, ets);
 	err = mlx5e_dbcnl_validate_ets(netdev, ets);
 	if (err)
 	if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct ieee_ets ets;
 	struct ieee_ets ets;
 	struct ieee_pfc pfc;
 	struct ieee_pfc pfc;
-	int err = -ENOTSUPP;
+	int err = -EOPNOTSUPP;
 	int i;
 	int i;
 
 
 	if (!MLX5_CAP_GEN(mdev, ets))
 	if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_core_dev *mdev = priv->mdev;
 
 
+	if (!MLX5_CAP_GEN(priv->mdev, ets)) {
+		netdev_err(netdev, "%s, ets is not supported\n", __func__);
+		return;
+	}
+
 	if (priority >= CEE_DCBX_MAX_PRIO) {
 	if (priority >= CEE_DCBX_MAX_PRIO) {
 		netdev_err(netdev,
 		netdev_err(netdev,
 			   "%s, priority is out of range\n", __func__);
 			   "%s, priority is out of range\n", __func__);

+ 26 - 15
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c

@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 
 
 	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
 	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
 	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
 	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
 	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
 	int i;
 	int i;
 
 
 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	mutex_lock(&priv->state_lock);
 	mutex_lock(&priv->state_lock);
 
 
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 
 
 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 {
 {
-	struct mlx5_core_dev *mdev = priv->mdev;
 	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
 	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-	int i;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+	int tt;
 
 
 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-	mlx5e_build_tir_ctx_hash(tirc, priv);
 
 
-	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
-		mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+		memset(tirc, 0, ctxlen);
+		mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+		mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+	}
 }
 }
 
 
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 {
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	bool hash_changed = false;
 	void *in;
 	void *in;
 
 
 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
 		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
 	}
 	}
 
 
-	if (key)
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+	    hfunc != priv->params.rss_hfunc) {
+		priv->params.rss_hfunc = hfunc;
+		hash_changed = true;
+	}
+
+	if (key) {
 		memcpy(priv->params.toeplitz_hash_key, key,
 		memcpy(priv->params.toeplitz_hash_key, key,
 		       sizeof(priv->params.toeplitz_hash_key));
 		       sizeof(priv->params.toeplitz_hash_key));
+		hash_changed = hash_changed ||
+			       priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+	}
 
 
-	if (hfunc != ETH_RSS_HASH_NO_CHANGE)
-		priv->params.rss_hfunc = hfunc;
-
-	mlx5e_modify_tirs_hash(priv, in, inlen);
+	if (hash_changed)
+		mlx5e_modify_tirs_hash(priv, in, inlen);
 
 
 	mutex_unlock(&priv->state_lock);
 	mutex_unlock(&priv->state_lock);
 
 
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	u32 mlx5_wol_mode;
 	u32 mlx5_wol_mode;
 
 
 	if (!wol_supported)
 	if (!wol_supported)
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	if (wol->wolopts & ~wol_supported)
 	if (wol->wolopts & ~wol_supported)
 		return -EINVAL;
 		return -EINVAL;
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
 
 
 	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
 	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
 	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
 	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	if (!rx_mode_changed)
 	if (!rx_mode_changed)
 		return 0;
 		return 0;
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 	bool reset;
 	bool reset;
 
 
 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
 	if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
 		netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
 		netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c

@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 
 
 	if (!priv->fs.ns)
 	if (!priv->fs.ns)
-		return -EINVAL;
+		return -EOPNOTSUPP;
 
 
 	err = mlx5e_arfs_create_tables(priv);
 	err = mlx5e_arfs_create_tables(priv);
 	if (err) {
 	if (err) {

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c

@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
 	ns = mlx5_get_flow_namespace(priv->mdev,
 	ns = mlx5_get_flow_namespace(priv->mdev,
 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
 	if (!ns)
 	if (!ns)
-		return ERR_PTR(-ENOTSUPP);
+		return ERR_PTR(-EOPNOTSUPP);
 
 
 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
 						       flow_table_properties_nic_receive.log_max_ft_size)),
 						       flow_table_properties_nic_receive.log_max_ft_size)),

+ 101 - 101
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
 	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 }
 }
 
 
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+				    enum mlx5e_traffic_types tt)
 {
 {
+	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
+				 MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
 	MLX5_SET(tirc, tirc, rx_hash_fn,
 	MLX5_SET(tirc, tirc, rx_hash_fn,
 		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
 		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
 	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
 	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
 		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
 		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
 	}
 	}
+
+	switch (tt) {
+	case MLX5E_TT_IPV4_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+
+	case MLX5E_TT_IPV6:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+	default:
+		WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+	}
 }
 }
 
 
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
 				      enum mlx5e_traffic_types tt)
 				      enum mlx5e_traffic_types tt)
 {
 {
-	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
 	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
 
-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
-				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
-				 MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
-				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
 	mlx5e_build_tir_ctx_lro(tirc, priv);
 	mlx5e_build_tir_ctx_lro(tirc, priv);
 
 
 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
 	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
 	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-	mlx5e_build_tir_ctx_hash(tirc, priv);
-
-	switch (tt) {
-	case MLX5E_TT_IPV4_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV6_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV4_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV6_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV4_IPSEC_AH:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV6_IPSEC_AH:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV4_IPSEC_ESP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV6_IPSEC_ESP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV4:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-
-	case MLX5E_TT_IPV6:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-	default:
-		WARN_ONCE(true,
-			  "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
-	}
+	mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
 }
 }
 
 
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 {
 {
 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
 	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
 	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
 	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
 	    !MLX5_CAP_ETH(mdev, csum_cap) ||
 	    !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 			       < 3) {
 			       < 3) {
 		mlx5_core_warn(mdev,
 		mlx5_core_warn(mdev,
 			       "Not creating net device, some required device capabilities are missing\n");
 			       "Not creating net device, some required device capabilities are missing\n");
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	}
 	}
 	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
 	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
 		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
 		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");

+ 6 - 7
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c

@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 				   __be32 *saddr,
 				   __be32 *saddr,
 				   int *out_ttl)
 				   int *out_ttl)
 {
 {
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 	struct rtable *rt;
 	struct rtable *rt;
 	struct neighbour *n = NULL;
 	struct neighbour *n = NULL;
 	int ttl;
 	int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
 #else
 	return -EOPNOTSUPP;
 	return -EOPNOTSUPP;
 #endif
 #endif
-
-	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-		pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
-		ip_rt_put(rt);
-		return -EOPNOTSUPP;
-	}
+	/* if the egress device isn't on the same HW e-switch, we use the uplink */
+	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
+		*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+	else
+		*out_dev = rt->dst.dev;
 
 
 	ttl = ip4_dst_hoplimit(&rt->dst);
 	ttl = ip4_dst_hoplimit(&rt->dst);
 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 	*out_n = n;
 	*out_n = n;
 	*saddr = fl4->saddr;
 	*saddr = fl4->saddr;
 	*out_ttl = ttl;
 	*out_ttl = ttl;
-	*out_dev = rt->dst.dev;
 
 
 	return 0;
 	return 0;
 }
 }

+ 5 - 5
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c

@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
 
 
 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
 		  vport, vlan, qos, set_flags);
 		  vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
-		return -ENOMEM;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	flow_group_in = mlx5_vzalloc(inlen);
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
 		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
-		return -EIO;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	flow_group_in = mlx5_vzalloc(inlen);
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
-		return -EIO;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	flow_group_in = mlx5_vzalloc(inlen);
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
 	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
 		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
 		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))

+ 22 - 14
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
 	return 0;
 	return 0;
 
 
 out_notsupp:
 out_notsupp:
-	return -ENOTSUPP;
+	return -EOPNOTSUPP;
 }
 }
 
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
+		err = -EOPNOTSUPP;
 		goto ns_err;
 		goto ns_err;
 	}
 	}
 
 
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
 	if (!ns) {
 	if (!ns) {
 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
-		return -ENOMEM;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
 	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
 		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
 		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 		if (err1)
 		if (err1)
-			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
 	}
 	}
 	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
 	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
 		if (mlx5_eswitch_inline_mode_get(esw,
 		if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
 	int vport;
 	int vport;
 	int err;
 	int err;
 
 
+	/* disable PF RoCE so missed packets don't go through RoCE steering */
+	mlx5_dev_list_lock();
+	mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	err = esw_create_offloads_fdb_table(esw, nvports);
 	err = esw_create_offloads_fdb_table(esw, nvports);
 	if (err)
 	if (err)
-		return err;
+		goto create_fdb_err;
 
 
 	err = esw_create_offloads_table(esw);
 	err = esw_create_offloads_table(esw);
 	if (err)
 	if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
 			goto err_reps;
 			goto err_reps;
 	}
 	}
 
 
-	/* disable PF RoCE so missed packets don't go through RoCE steering */
-	mlx5_dev_list_lock();
-	mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-	mlx5_dev_list_unlock();
-
 	return 0;
 	return 0;
 
 
 err_reps:
 err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
 
 
 create_ft_err:
 create_ft_err:
 	esw_destroy_offloads_fdb_table(esw);
 	esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+	/* enable back PF RoCE */
+	mlx5_dev_list_lock();
+	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	return err;
 	return err;
 }
 }
 
 
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
 {
 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
 
-	/* enable back PF RoCE */
-	mlx5_dev_list_lock();
-	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-	mlx5_dev_list_unlock();
-
 	mlx5_eswitch_disable_sriov(esw);
 	mlx5_eswitch_disable_sriov(esw);
 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 	if (err) {
 	if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
 			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
 	}
 	}
 
 
+	/* enable back PF RoCE */
+	mlx5_dev_list_lock();
+	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	return err;
 	return err;
 }
 }
 
 

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c

@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
 						flow_table_properties_nic_receive.
 						flow_table_properties_nic_receive.
 						flow_modify_en);
 						flow_modify_en);
 	if (!atomic_mod_cap)
 	if (!atomic_mod_cap)
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	opmod = 1;
 	opmod = 1;
 
 
 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно