Эх сурвалжийг харах

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

All merge conflicts were simple overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 8 жил өмнө
parent
commit
e2160156bf
100 өөрчлөгдсөн 1131 нэмэгдсэн , 557 устгасан
  1. 3 0
      Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
  2. 0 1
      MAINTAINERS
  3. 2 2
      Makefile
  4. 3 1
      arch/arc/include/asm/delay.h
  5. 7 7
      arch/arc/kernel/head.S
  6. 23 32
      arch/arc/kernel/mcip.c
  7. 20 5
      arch/arc/kernel/smp.c
  8. 2 1
      arch/arc/kernel/unaligned.c
  9. 42 46
      arch/arm64/crypto/aes-modes.S
  10. 7 1
      arch/arm64/kernel/topology.c
  11. 7 1
      arch/parisc/include/asm/bitops.h
  12. 0 2
      arch/parisc/include/uapi/asm/bitsperlong.h
  13. 3 2
      arch/parisc/include/uapi/asm/swab.h
  14. 4 4
      arch/sparc/include/asm/mmu_context_64.h
  15. 1 1
      arch/sparc/kernel/irq_64.c
  16. 3 3
      arch/sparc/kernel/sstate.c
  17. 73 0
      arch/sparc/kernel/traps_64.c
  18. 1 0
      crypto/algapi.c
  19. 4 2
      drivers/ata/libata-core.c
  20. 3 0
      drivers/ata/sata_mv.c
  21. 3 0
      drivers/bcma/bcma_private.h
  22. 3 8
      drivers/bcma/driver_chipcommon.c
  23. 3 0
      drivers/bcma/driver_mips.c
  24. 46 23
      drivers/dma/cppi41.c
  25. 6 13
      drivers/dma/pl330.c
  26. 3 0
      drivers/gpu/drm/i915/intel_display.c
  27. 12 16
      drivers/hid/hid-cp2112.c
  28. 3 0
      drivers/hid/hid-ids.h
  29. 1 1
      drivers/hid/hid-lg.c
  30. 1 0
      drivers/hid/usbhid/hid-quirks.c
  31. 15 13
      drivers/hid/wacom_wac.c
  32. 4 4
      drivers/i2c/busses/i2c-cadence.c
  33. 20 0
      drivers/i2c/busses/i2c-imx-lpi2c.c
  34. 2 2
      drivers/input/rmi4/rmi_driver.c
  35. 1 1
      drivers/input/touchscreen/wm97xx-core.c
  36. 5 0
      drivers/md/md.c
  37. 88 18
      drivers/md/raid5-cache.c
  38. 94 27
      drivers/md/raid5.c
  39. 7 0
      drivers/md/raid5.h
  40. 43 2
      drivers/net/ethernet/adaptec/starfire.c
  41. 130 58
      drivers/net/ethernet/cadence/macb.c
  42. 16 3
      drivers/net/ethernet/cadence/macb.h
  43. 1 2
      drivers/net/ethernet/cavium/thunder/thunder_xcv.c
  44. 28 5
      drivers/net/ethernet/emulex/benet/be_main.c
  45. 2 2
      drivers/net/ethernet/freescale/gianfar.c
  46. 1 1
      drivers/net/ethernet/mellanox/mlx4/catas.c
  47. 12 0
      drivers/net/ethernet/mellanox/mlx4/intf.c
  48. 1 0
      drivers/net/ethernet/mellanox/mlx4/mlx4.h
  49. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/cmd.c
  50. 4 3
      drivers/net/ethernet/mellanox/mlx5/core/en.h
  51. 8 3
      drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
  52. 26 15
      drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
  53. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
  54. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
  55. 101 101
      drivers/net/ethernet/mellanox/mlx5/core/en_main.c
  56. 6 8
      drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
  57. 5 5
      drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
  58. 22 14
      drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
  59. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
  60. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
  61. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/main.c
  62. 2 2
      drivers/net/ethernet/mellanox/mlx5/core/port.c
  63. 1 1
      drivers/net/ethernet/mellanox/mlx5/core/vport.c
  64. 4 0
      drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
  65. 1 1
      drivers/net/phy/micrel.c
  66. 1 1
      drivers/net/wireless/intel/iwlwifi/iwl-8000.c
  67. 4 3
      drivers/net/wireless/intel/iwlwifi/mvm/sta.c
  68. 8 4
      drivers/net/wireless/intel/iwlwifi/mvm/tt.c
  69. 4 4
      drivers/parport/parport_gsc.c
  70. 1 1
      drivers/pinctrl/berlin/berlin-bg4ct.c
  71. 20 7
      drivers/pinctrl/intel/pinctrl-baytrail.c
  72. 3 0
      drivers/pinctrl/intel/pinctrl-merrifield.c
  73. 1 2
      drivers/pinctrl/sunxi/pinctrl-sunxi.c
  74. 4 1
      drivers/rtc/Kconfig
  75. 10 2
      drivers/rtc/rtc-jz4740.c
  76. 1 0
      fs/cifs/readdir.c
  77. 5 0
      fs/fscache/cookie.c
  78. 1 0
      fs/fscache/netfs.c
  79. 30 2
      fs/fscache/object.c
  80. 3 1
      fs/nfs/nfs4proc.c
  81. 1 0
      fs/nfs/nfs4state.c
  82. 1 1
      fs/nfs/pnfs.c
  83. 3 4
      include/linux/can/core.h
  84. 1 0
      include/linux/fscache-cache.h
  85. 15 14
      include/linux/netdevice.h
  86. 2 1
      include/linux/nfs4.h
  87. 2 2
      include/linux/percpu-refcount.h
  88. 1 0
      include/linux/sunrpc/clnt.h
  89. 5 0
      include/net/ipv6.h
  90. 8 8
      include/soc/arc/mcip.h
  91. 3 1
      include/uapi/linux/ethtool.h
  92. 5 8
      kernel/cgroup.c
  93. 5 3
      kernel/trace/trace_hwlat.c
  94. 10 2
      net/can/af_can.c
  95. 2 1
      net/can/af_can.h
  96. 18 9
      net/can/bcm.c
  97. 1 1
      net/can/gw.c
  98. 2 2
      net/can/raw.c
  99. 4 2
      net/ipv4/tcp_output.c
  100. 1 1
      net/ipv6/ip6_output.c

+ 3 - 0
Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt

@@ -15,6 +15,9 @@ Properties:
   Second cell specifies the irq distribution mode to cores
   Second cell specifies the irq distribution mode to cores
      0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
      0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
 
 
+  The second cell in interrupts property is deprecated and may be ignored by
+  the kernel.
+
   intc accessed via the special ARC AUX register interface, hence "reg" property
   intc accessed via the special ARC AUX register interface, hence "reg" property
   is not specified.
   is not specified.
 
 

+ 0 - 1
MAINTAINERS

@@ -10208,7 +10208,6 @@ F:	drivers/media/tuners/qt1010*
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 M:	QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 L:	linux-wireless@vger.kernel.org
 L:	linux-wireless@vger.kernel.org
-L:	ath9k-devel@lists.ath9k.org
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 W:	http://wireless.kernel.org/en/users/Drivers/ath9k
 S:	Supported
 S:	Supported
 F:	drivers/net/wireless/ath/ath9k/
 F:	drivers/net/wireless/ath/ath9k/

+ 2 - 2
Makefile

@@ -1,8 +1,8 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 10
 PATCHLEVEL = 10
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Anniversary Edition
+EXTRAVERSION = -rc6
+NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
 # To see a list of typical targets execute "make help"

+ 3 - 1
arch/arc/include/asm/delay.h

@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
 	"	lp  1f			\n"
 	"	lp  1f			\n"
 	"	nop			\n"
 	"	nop			\n"
 	"1:				\n"
 	"1:				\n"
-	: : "r"(loops));
+	:
+        : "r"(loops)
+        : "lp_count");
 }
 }
 
 
 extern void __bad_udelay(void);
 extern void __bad_udelay(void);

+ 7 - 7
arch/arc/kernel/head.S

@@ -71,14 +71,14 @@ ENTRY(stext)
 	GET_CPU_ID  r5
 	GET_CPU_ID  r5
 	cmp	r5, 0
 	cmp	r5, 0
 	mov.nz	r0, r5
 	mov.nz	r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
-	; Non-Master can proceed as system would be booted sufficiently
-	jnz	first_lines_of_secondary
-#else
+	bz	.Lmaster_proceed
+
 	; Non-Masters wait for Master to boot enough and bring them up
 	; Non-Masters wait for Master to boot enough and bring them up
-	jnz	arc_platform_smp_wait_to_boot
-#endif
-	; Master falls thru
+	; when they resume, tail-call to entry point
+	mov	blink, @first_lines_of_secondary
+	j	arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
 #endif
 #endif
 
 
 	; Clear BSS before updating any globals
 	; Clear BSS before updating any globals

+ 23 - 32
arch/arc/kernel/mcip.c

@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 	READ_BCR(ARC_REG_MCIP_BCR, mp);
 
 
 	sprintf(smp_cpuinfo_buf,
 	sprintf(smp_cpuinfo_buf,
-		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
+		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
 		mp.ver, mp.num_cores,
 		mp.ver, mp.num_cores,
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.ipi, "IPI "),
 		IS_AVAIL1(mp.idu, "IDU "),
 		IS_AVAIL1(mp.idu, "IDU "),
-		IS_AVAIL1(mp.llm, "LLM "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.gfrc, "GFRC"));
 		IS_AVAIL1(mp.gfrc, "GFRC"));
 
 
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 	raw_spin_unlock_irqrestore(&mcip_lock, flags);
 }
 }
 
 
-#ifdef CONFIG_SMP
 static int
 static int
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 		     bool force)
 		     bool force)
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
 
 
 	return IRQ_SET_MASK_OK;
 	return IRQ_SET_MASK_OK;
 }
 }
-#endif
+
+static void idu_irq_enable(struct irq_data *data)
+{
+	/*
+	 * By default send all common interrupts to all available online CPUs.
+	 * The affinity of common interrupts in IDU must be set manually since
+	 * in some cases the kernel will not call irq_set_affinity() by itself:
+	 *   1. When the kernel is not configured with support of SMP.
+	 *   2. When the kernel is configured with support of SMP but upper
+	 *      interrupt controllers does not support setting of the affinity
+	 *      and cannot propagate it to IDU.
+	 */
+	idu_irq_set_affinity(data, cpu_online_mask, false);
+	idu_irq_unmask(data);
+}
 
 
 static struct irq_chip idu_irq_chip = {
 static struct irq_chip idu_irq_chip = {
 	.name			= "MCIP IDU Intc",
 	.name			= "MCIP IDU Intc",
 	.irq_mask		= idu_irq_mask,
 	.irq_mask		= idu_irq_mask,
 	.irq_unmask		= idu_irq_unmask,
 	.irq_unmask		= idu_irq_unmask,
+	.irq_enable		= idu_irq_enable,
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	.irq_set_affinity       = idu_irq_set_affinity,
 	.irq_set_affinity       = idu_irq_set_affinity,
 #endif
 #endif
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
 			 const u32 *intspec, unsigned int intsize,
 			 const u32 *intspec, unsigned int intsize,
 			 irq_hw_number_t *out_hwirq, unsigned int *out_type)
 			 irq_hw_number_t *out_hwirq, unsigned int *out_type)
 {
 {
-	irq_hw_number_t hwirq = *out_hwirq = intspec[0];
-	int distri = intspec[1];
-	unsigned long flags;
-
+	/*
+	 * Ignore value of interrupt distribution mode for common interrupts in
+	 * IDU which resides in intspec[1] since setting an affinity using value
+	 * from Device Tree is deprecated in ARC.
+	 */
+	*out_hwirq = intspec[0];
 	*out_type = IRQ_TYPE_NONE;
 	*out_type = IRQ_TYPE_NONE;
 
 
-	/* XXX: validate distribution scheme again online cpu mask */
-	if (distri == 0) {
-		/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
-		raw_spin_lock_irqsave(&mcip_lock, flags);
-		idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
-		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
-		raw_spin_unlock_irqrestore(&mcip_lock, flags);
-	} else {
-		/*
-		 * DEST based distribution for Level Triggered intr can only
-		 * have 1 CPU, so generalize it to always contain 1 cpu
-		 */
-		int cpu = ffs(distri);
-
-		if (cpu != fls(distri))
-			pr_warn("IDU irq %lx distri mode set to cpu %x\n",
-				hwirq, cpu);
-
-		raw_spin_lock_irqsave(&mcip_lock, flags);
-		idu_set_dest(hwirq, cpu);
-		idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
-		raw_spin_unlock_irqrestore(&mcip_lock, flags);
-	}
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 20 - 5
arch/arc/kernel/smp.c

@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
  */
  */
 static volatile int wake_flag;
 static volatile int wake_flag;
 
 
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f)		f
+#define __boot_write(f, v)	f = v
+
+#else
+
+#define __boot_read(f)		arc_read_uncached_32(&f)
+#define __boot_write(f, v)	arc_write_uncached_32(&f, v)
+
+#endif
+
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
 {
 {
 	BUG_ON(cpu == 0);
 	BUG_ON(cpu == 0);
-	wake_flag = cpu;
+
+	__boot_write(wake_flag, cpu);
 }
 }
 
 
 void arc_platform_smp_wait_to_boot(int cpu)
 void arc_platform_smp_wait_to_boot(int cpu)
 {
 {
-	while (wake_flag != cpu)
+	/* for halt-on-reset, we've waited already */
+	if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+		return;
+
+	while (__boot_read(wake_flag) != cpu)
 		;
 		;
 
 
-	wake_flag = 0;
-	__asm__ __volatile__("j @first_lines_of_secondary	\n");
+	__boot_write(wake_flag, 0);
 }
 }
 
 
-
 const char *arc_platform_smp_cpuinfo(void)
 const char *arc_platform_smp_cpuinfo(void)
 {
 {
 	return plat_smp_ops.info ? : "";
 	return plat_smp_ops.info ? : "";

+ 2 - 1
arch/arc/kernel/unaligned.c

@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
 	if (state.fault)
 	if (state.fault)
 		goto fault;
 		goto fault;
 
 
+	/* clear any remanants of delay slot */
 	if (delay_mode(regs)) {
 	if (delay_mode(regs)) {
-		regs->ret = regs->bta;
+		regs->ret = regs->bta ~1U;
 		regs->status32 &= ~STATUS_DE_MASK;
 		regs->status32 &= ~STATUS_DE_MASK;
 	} else {
 	} else {
 		regs->ret += state.instr_len;
 		regs->ret += state.instr_len;

+ 42 - 46
arch/arm64/crypto/aes-modes.S

@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
 	cbz		w6, .Lcbcencloop
 	cbz		w6, .Lcbcencloop
 
 
 	ld1		{v0.16b}, [x5]			/* get iv */
 	ld1		{v0.16b}, [x5]			/* get iv */
-	enc_prepare	w3, x2, x5
+	enc_prepare	w3, x2, x6
 
 
 .Lcbcencloop:
 .Lcbcencloop:
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
-	encrypt_block	v0, w3, x2, x5, w6
+	encrypt_block	v0, w3, x2, x6, w7
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bne		.Lcbcencloop
 	bne		.Lcbcencloop
+	st1		{v0.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_encrypt)
 AES_ENDPROC(aes_cbc_encrypt)
 
 
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	cbz		w6, .LcbcdecloopNx
 	cbz		w6, .LcbcdecloopNx
 
 
 	ld1		{v7.16b}, [x5]			/* get iv */
 	ld1		{v7.16b}, [x5]			/* get iv */
-	dec_prepare	w3, x2, x5
+	dec_prepare	w3, x2, x6
 
 
 .LcbcdecloopNx:
 .LcbcdecloopNx:
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
 .Lcbcdecloop:
 .Lcbcdecloop:
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
-	decrypt_block	v0, w3, x2, x5, w6
+	decrypt_block	v0, w3, x2, x6, w7
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	mov		v7.16b, v1.16b			/* ct is next iv */
 	st1		{v0.16b}, [x0], #16
 	st1		{v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
 	bne		.Lcbcdecloop
 	bne		.Lcbcdecloop
 .Lcbcdecout:
 .Lcbcdecout:
 	FRAME_POP
 	FRAME_POP
+	st1		{v7.16b}, [x5]			/* return iv */
 	ret
 	ret
 AES_ENDPROC(aes_cbc_decrypt)
 AES_ENDPROC(aes_cbc_decrypt)
 
 
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
 
 
 AES_ENTRY(aes_ctr_encrypt)
 AES_ENTRY(aes_ctr_encrypt)
 	FRAME_PUSH
 	FRAME_PUSH
-	cbnz		w6, .Lctrfirst		/* 1st time around? */
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
-#if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
-	bcs		.Lctrinc
-	add		x5, x5, #1		/* increment BE ctr */
-	b		.LctrincNx
-#else
-	b		.Lctrinc
-#endif
-.Lctrfirst:
+	cbz		w6, .Lctrnotfirst	/* 1st time around? */
 	enc_prepare	w3, x2, x6
 	enc_prepare	w3, x2, x6
 	ld1		{v4.16b}, [x5]
 	ld1		{v4.16b}, [x5]
-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
-	rev		x5, x5
+
+.Lctrnotfirst:
+	umov		x8, v4.d[1]		/* keep swabbed ctr in reg */
+	rev		x8, x8
 #if INTERLEAVE >= 2
 #if INTERLEAVE >= 2
-	cmn		w5, w4			/* 32 bit overflow? */
+	cmn		w8, w4			/* 32 bit overflow? */
 	bcs		.Lctrloop
 	bcs		.Lctrloop
 .LctrloopNx:
 .LctrloopNx:
 	subs		w4, w4, #INTERLEAVE
 	subs		w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
 #if INTERLEAVE == 2
 #if INTERLEAVE == 2
 	mov		v0.8b, v4.8b
 	mov		v0.8b, v4.8b
 	mov		v1.8b, v4.8b
 	mov		v1.8b, v4.8b
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v0.d[1], x7
 	ins		v0.d[1], x7
-	rev		x7, x5
-	add		x5, x5, #1
+	rev		x7, x8
+	add		x8, x8, #1
 	ins		v1.d[1], x7
 	ins		v1.d[1], x7
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
 	do_encrypt_block2x
 	do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
 	st1		{v0.16b-v1.16b}, [x0], #32
 	st1		{v0.16b-v1.16b}, [x0], #32
 #else
 #else
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
-	dup		v7.4s, w5
+	dup		v7.4s, w8
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	add		v7.4s, v7.4s, v8.4s
 	add		v7.4s, v7.4s, v8.4s
 	mov		v1.16b, v4.16b
 	mov		v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v2.16b, v7.16b, v2.16b
 	eor		v3.16b, v5.16b, v3.16b
 	eor		v3.16b, v5.16b, v3.16b
 	st1		{v0.16b-v3.16b}, [x0], #64
 	st1		{v0.16b-v3.16b}, [x0], #64
-	add		x5, x5, #INTERLEAVE
+	add		x8, x8, #INTERLEAVE
 #endif
 #endif
-	cbz		w4, .LctroutNx
-.LctrincNx:
-	rev		x7, x5
+	rev		x7, x8
 	ins		v4.d[1], x7
 	ins		v4.d[1], x7
+	cbz		w4, .Lctrout
 	b		.LctrloopNx
 	b		.LctrloopNx
-.LctroutNx:
-	sub		x5, x5, #1
-	rev		x7, x5
-	ins		v4.d[1], x7
-	b		.Lctrout
 .Lctr1x:
 .Lctr1x:
 	adds		w4, w4, #INTERLEAVE
 	adds		w4, w4, #INTERLEAVE
 	beq		.Lctrout
 	beq		.Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
 .Lctrloop:
 .Lctrloop:
 	mov		v0.16b, v4.16b
 	mov		v0.16b, v4.16b
 	encrypt_block	v0, w3, x2, x6, w7
 	encrypt_block	v0, w3, x2, x6, w7
+
+	adds		x8, x8, #1		/* increment BE ctr */
+	rev		x7, x8
+	ins		v4.d[1], x7
+	bcs		.Lctrcarry		/* overflow? */
+
+.Lctrcarrydone:
 	subs		w4, w4, #1
 	subs		w4, w4, #1
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
 	ld1		{v3.16b}, [x1], #16
 	ld1		{v3.16b}, [x1], #16
 	eor		v3.16b, v0.16b, v3.16b
 	eor		v3.16b, v0.16b, v3.16b
 	st1		{v3.16b}, [x0], #16
 	st1		{v3.16b}, [x0], #16
-	beq		.Lctrout
-.Lctrinc:
-	adds		x5, x5, #1		/* increment BE ctr */
-	rev		x7, x5
-	ins		v4.d[1], x7
-	bcc		.Lctrloop		/* no overflow? */
-	umov		x7, v4.d[0]		/* load upper word of ctr  */
-	rev		x7, x7			/* ... to handle the carry */
-	add		x7, x7, #1
-	rev		x7, x7
-	ins		v4.d[0], x7
-	b		.Lctrloop
+	bne		.Lctrloop
+
+.Lctrout:
+	st1		{v4.16b}, [x5]		/* return next CTR value */
+	FRAME_POP
+	ret
+
 .Lctrhalfblock:
 .Lctrhalfblock:
 	ld1		{v3.8b}, [x1]
 	ld1		{v3.8b}, [x1]
 	eor		v3.8b, v0.8b, v3.8b
 	eor		v3.8b, v0.8b, v3.8b
 	st1		{v3.8b}, [x0]
 	st1		{v3.8b}, [x0]
-.Lctrout:
 	FRAME_POP
 	FRAME_POP
 	ret
 	ret
+
+.Lctrcarry:
+	umov		x7, v4.d[0]		/* load upper word of ctr  */
+	rev		x7, x7			/* ... to handle the carry */
+	add		x7, x7, #1
+	rev		x7, x7
+	ins		v4.d[0], x7
+	b		.Lctrcarrydone
 AES_ENDPROC(aes_ctr_encrypt)
 AES_ENDPROC(aes_ctr_encrypt)
 	.ltorg
 	.ltorg
 
 

+ 7 - 1
arch/arm64/kernel/topology.c

@@ -11,6 +11,7 @@
  * for more details.
  * for more details.
  */
  */
 
 
+#include <linux/acpi.h>
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
 #include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
 
 
 static int __init register_cpufreq_notifier(void)
 static int __init register_cpufreq_notifier(void)
 {
 {
-	if (cap_parsing_failed)
+	/*
+	 * on ACPI-based systems we need to use the default cpu capacity
+	 * until we have the necessary code to parse the cpu capacity, so
+	 * skip registering cpufreq notifier.
+	 */
+	if (!acpi_disabled || cap_parsing_failed)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
 	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {

+ 7 - 1
arch/parisc/include/asm/bitops.h

@@ -6,7 +6,7 @@
 #endif
 #endif
 
 
 #include <linux/compiler.h>
 #include <linux/compiler.h>
-#include <asm/types.h>		/* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 #include <asm/barrier.h>
 #include <linux/atomic.h>
 #include <linux/atomic.h>
@@ -17,6 +17,12 @@
  * to include/asm-i386/bitops.h or kerneldoc
  * to include/asm-i386/bitops.h or kerneldoc
  */
  */
 
 
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
 
 
 
 

+ 0 - 2
arch/parisc/include/uapi/asm/bitsperlong.h

@@ -3,10 +3,8 @@
 
 
 #if defined(__LP64__)
 #if defined(__LP64__)
 #define __BITS_PER_LONG 64
 #define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
 #else
 #else
 #define __BITS_PER_LONG 32
 #define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
 #endif
 #endif
 
 
 #include <asm-generic/bitsperlong.h>
 #include <asm-generic/bitsperlong.h>

+ 3 - 2
arch/parisc/include/uapi/asm/swab.h

@@ -1,6 +1,7 @@
 #ifndef _PARISC_SWAB_H
 #ifndef _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 
 
+#include <asm/bitsperlong.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 
 
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 }
 }
 #define __arch_swab32 __arch_swab32
 #define __arch_swab32 __arch_swab32
 
 
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
 /*
 /*
 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
 ** From "PA-RISC 2.0 Architecture", HP Professional Books.
 ** See Appendix I page 8 , "Endian Byte Swapping".
 ** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 	return x;
 	return x;
 }
 }
 #define __arch_swab64 __arch_swab64
 #define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
 
 
 #endif /* _PARISC_SWAB_H */
 #endif /* _PARISC_SWAB_H */

+ 4 - 4
arch/sparc/include/asm/mmu_context_64.h

@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
 static inline void tsb_context_switch(struct mm_struct *mm)
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
 {
 	__tsb_context_switch(__pa(mm->pgd),
 	__tsb_context_switch(__pa(mm->pgd),
-			     &mm->context.tsb_block[0],
+			     &mm->context.tsb_block[MM_TSB_BASE],
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-			     (mm->context.tsb_block[1].tsb ?
-			      &mm->context.tsb_block[1] :
+			     (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+			      &mm->context.tsb_block[MM_TSB_HUGE] :
 			      NULL)
 			      NULL)
 #else
 #else
 			     NULL
 			     NULL
 #endif
 #endif
-			     , __pa(&mm->context.tsb_descr[0]));
+			     , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
 }
 }
 
 
 void tsb_grow(struct mm_struct *mm,
 void tsb_grow(struct mm_struct *mm,

+ 1 - 1
arch/sparc/kernel/irq_64.c

@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
 	unsigned long order = get_order(size);
 	unsigned long order = get_order(size);
 	unsigned long p;
 	unsigned long p;
 
 
-	p = __get_free_pages(GFP_KERNEL, order);
+	p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 	if (!p) {
 	if (!p) {
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_printf("SUN4V: Error, cannot allocate queue.\n");
 		prom_halt();
 		prom_halt();

+ 3 - 3
arch/sparc/kernel/sstate.c

@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
 	"Linux powering off";
 	"Linux powering off";
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
 	"Linux rebooting";
 	"Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
-	"Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+	"Linux panicking";
 
 
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 {
 {
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
 
 
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 {
 {
-	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+	do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
 
 
 	return NOTIFY_DONE;
 	return NOTIFY_DONE;
 }
 }

+ 73 - 0
arch/sparc/kernel/traps_64.c

@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
 	atomic_inc(&sun4v_resum_oflow_cnt);
 	atomic_inc(&sun4v_resum_oflow_cnt);
 }
 }
 
 
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+	unsigned int insn;
+
+	if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+		return compute_effective_address(regs, insn,
+						 (insn >> 25) & 0x1f);
+	}
+	return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+				  struct sun4v_error_entry *ent) {
+
+	unsigned int attrs = ent->err_attrs;
+
+	if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+		unsigned long addr = ent->err_raddr;
+		siginfo_t info;
+
+		if (addr == ~(u64)0) {
+			/* This seems highly unlikely to ever occur */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+		} else {
+			unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+							      PAGE_SIZE);
+
+			/* Break the unfortunate news. */
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+				 addr);
+			pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
+				 page_cnt);
+
+			while (page_cnt-- > 0) {
+				if (pfn_valid(addr >> PAGE_SHIFT))
+					get_page(pfn_to_page(addr >> PAGE_SHIFT));
+				addr += PAGE_SIZE;
+			}
+		}
+		info.si_signo = SIGKILL;
+		info.si_errno = 0;
+		info.si_trapno = 0;
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+	if (attrs & SUN4V_ERR_ATTRS_PIO) {
+		siginfo_t info;
+
+		info.si_signo = SIGBUS;
+		info.si_code = BUS_ADRERR;
+		info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+		force_sig_info(info.si_signo, &info, current);
+
+		return true;
+	}
+
+	/* Default to doing nothing */
+	return false;
+}
+
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  * Log the event, clear the first word of the entry, and die.
  */
  */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 
 
 	put_cpu();
 	put_cpu();
 
 
+	if (!(regs->tstate & TSTATE_PRIV) &&
+	    sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+		/* DON'T PANIC: This userspace error was handled. */
+		return;
+	}
+
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
 	/* Check for the special PCI poke sequence. */
 	/* Check for the special PCI poke sequence. */
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {

+ 1 - 0
crypto/algapi.c

@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
 	struct crypto_larval *larval;
 	struct crypto_larval *larval;
 	int err;
 	int err;
 
 
+	alg->cra_flags &= ~CRYPTO_ALG_DEAD;
 	err = crypto_check_alg(alg);
 	err = crypto_check_alg(alg);
 	if (err)
 	if (err)
 		return err;
 		return err;

+ 4 - 2
drivers/ata/libata-core.c

@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 
 
 		if (qc->err_mask & ~AC_ERR_OTHER)
 		if (qc->err_mask & ~AC_ERR_OTHER)
 			qc->err_mask &= ~AC_ERR_OTHER;
 			qc->err_mask &= ~AC_ERR_OTHER;
+	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+		qc->result_tf.command |= ATA_SENSE;
 	}
 	}
 
 
 	/* finish up */
 	/* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
 
 
 	/*
 	/*
-	 * Device times out with higher max sects.
+	 * These devices time out with higher max sects.
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
 	 */
 	 */
-	{ "LITEON CX1-JB256-HP", NULL,		ATA_HORKAGE_MAX_SEC_1024 },
+	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
 
 
 	/* Devices we expect to fail diagnostics */
 	/* Devices we expect to fail diagnostics */
 
 

+ 3 - 0
drivers/ata/sata_mv.c

@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
 	host->iomap = NULL;
 	host->iomap = NULL;
 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
 				   resource_size(res));
 				   resource_size(res));
+	if (!hpriv->base)
+		return -ENOMEM;
+
 	hpriv->base -= SATAHC0_REG_BASE;
 	hpriv->base -= SATAHC0_REG_BASE;
 
 
 	hpriv->clk = clk_get(&pdev->dev, NULL);
 	hpriv->clk = clk_get(&pdev->dev, NULL);

+ 3 - 0
drivers/bcma/bcma_private.h

@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
 
 
 /* driver_chipcommon_b.c */
 /* driver_chipcommon_b.c */
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);

+ 3 - 8
drivers/bcma/driver_chipcommon.c

@@ -15,8 +15,6 @@
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/bcma/bcma.h>
 
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
 					 u32 mask, u32 value)
 					 u32 mask, u32 value)
 {
 {
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
 	if (cc->capabilities & BCMA_CC_CAP_PMU)
 	if (cc->capabilities & BCMA_CC_CAP_PMU)
 		bcma_pmu_early_init(cc);
 		bcma_pmu_early_init(cc);
 
 
-	if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
-		bcma_chipco_serial_init(cc);
-
 	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
 	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
 		bcma_core_chipcommon_flash_detect(cc);
 		bcma_core_chipcommon_flash_detect(cc);
 
 
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
 	return res;
 	return res;
 }
 }
 
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
 {
-#if IS_BUILTIN(CONFIG_BCM47XX)
 	unsigned int irq;
 	unsigned int irq;
 	u32 baud_base;
 	u32 baud_base;
 	u32 i;
 	u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 		ports[i].baud_base = baud_base;
 		ports[i].baud_base = baud_base;
 		ports[i].reg_shift = 0;
 		ports[i].reg_shift = 0;
 	}
 	}
-#endif /* CONFIG_BCM47XX */
 }
 }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */

+ 3 - 0
drivers/bcma/driver_mips.c

@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
 
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
 {
+	struct bcma_bus *bus = mcore->core->bus;
+
 	if (mcore->early_setup_done)
 	if (mcore->early_setup_done)
 		return;
 		return;
 
 
+	bcma_chipco_serial_init(&bus->drv_cc);
 	bcma_core_mips_nvram_init(mcore);
 	bcma_core_mips_nvram_init(mcore);
 
 
 	mcore->early_setup_done = true;
 	mcore->early_setup_done = true;

+ 46 - 23
drivers/dma/cppi41.c

@@ -153,6 +153,8 @@ struct cppi41_dd {
 
 
 	/* context for suspend/resume */
 	/* context for suspend/resume */
 	unsigned int dma_tdfdq;
 	unsigned int dma_tdfdq;
+
+	bool is_suspended;
 };
 };
 
 
 #define FIST_COMPLETION_QUEUE	93
 #define FIST_COMPLETION_QUEUE	93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
 	BUG_ON(desc_num >= ALLOC_DECS_NUM);
 	BUG_ON(desc_num >= ALLOC_DECS_NUM);
 	c = cdd->chan_busy[desc_num];
 	c = cdd->chan_busy[desc_num];
 	cdd->chan_busy[desc_num] = NULL;
 	cdd->chan_busy[desc_num] = NULL;
+
+	/* Usecount for chan_busy[], paired with push_desc_queue() */
+	pm_runtime_put(cdd->ddev.dev);
+
 	return c;
 	return c;
 }
 }
 
 
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 
 
 		while (val) {
 		while (val) {
 			u32 desc, len;
 			u32 desc, len;
-			int error;
 
 
-			error = pm_runtime_get(cdd->ddev.dev);
-			if (error < 0)
-				dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-					__func__, error);
+			/*
+			 * This should never trigger, see the comments in
+			 * push_desc_queue()
+			 */
+			WARN_ON(cdd->is_suspended);
 
 
 			q_num = __fls(val);
 			q_num = __fls(val);
 			val &= ~(1 << q_num);
 			val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 			c->residue = pd_trans_len(c->desc->pd6) - len;
 			c->residue = pd_trans_len(c->desc->pd6) - len;
 			dma_cookie_complete(&c->txd);
 			dma_cookie_complete(&c->txd);
 			dmaengine_desc_get_callback_invoke(&c->txd, NULL);
 			dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
-			pm_runtime_mark_last_busy(cdd->ddev.dev);
-			pm_runtime_put_autosuspend(cdd->ddev.dev);
 		}
 		}
 	}
 	}
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
 	 */
 	 */
 	__iowmb();
 	__iowmb();
 
 
+	/*
+	 * DMA transfers can take at least 200ms to complete with USB mass
+	 * storage connected. To prevent autosuspend timeouts, we must use
+	 * pm_runtime_get/put() when chan_busy[] is modified. This will get
+	 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+	 * outcome of the transfer.
+	 */
+	pm_runtime_get(cdd->ddev.dev);
+
 	desc_phys = lower_32_bits(c->desc_phys);
 	desc_phys = lower_32_bits(c->desc_phys);
 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
 	WARN_ON(cdd->chan_busy[desc_num]);
 	WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
 	cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 	cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 }
 }
 
 
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
 {
 {
-	struct cppi41_dd *cdd = c->cdd;
-	unsigned long flags;
+	struct cppi41_channel *c, *_c;
 
 
-	spin_lock_irqsave(&cdd->lock, flags);
-	list_add_tail(&c->node, &cdd->pending);
-	spin_unlock_irqrestore(&cdd->lock, flags);
+	list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+		push_desc_queue(c);
+		list_del(&c->node);
+	}
 }
 }
 
 
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 {
 {
 	struct cppi41_channel *c = to_cpp41_chan(chan);
 	struct cppi41_channel *c = to_cpp41_chan(chan);
 	struct cppi41_dd *cdd = c->cdd;
 	struct cppi41_dd *cdd = c->cdd;
+	unsigned long flags;
 	int error;
 	int error;
 
 
 	error = pm_runtime_get(cdd->ddev.dev);
 	error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
 		return;
 		return;
 	}
 	}
 
 
-	if (likely(pm_runtime_active(cdd->ddev.dev)))
-		push_desc_queue(c);
-	else
-		pending_desc(c);
+	spin_lock_irqsave(&cdd->lock, flags);
+	list_add_tail(&c->node, &cdd->pending);
+	if (!cdd->is_suspended)
+		cppi41_run_queue(cdd);
+	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	pm_runtime_mark_last_busy(cdd->ddev.dev);
 	pm_runtime_mark_last_busy(cdd->ddev.dev);
 	pm_runtime_put_autosuspend(cdd->ddev.dev);
 	pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
 	WARN_ON(!cdd->chan_busy[desc_num]);
 	WARN_ON(!cdd->chan_busy[desc_num]);
 	cdd->chan_busy[desc_num] = NULL;
 	cdd->chan_busy[desc_num] = NULL;
 
 
+	/* Usecount for chan_busy[], paired with push_desc_queue() */
+	pm_runtime_put(cdd->ddev.dev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 {
 {
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
+	unsigned long flags;
 
 
+	spin_lock_irqsave(&cdd->lock, flags);
+	cdd->is_suspended = true;
 	WARN_ON(!list_empty(&cdd->pending));
 	WARN_ON(!list_empty(&cdd->pending));
+	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 {
 {
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
 	struct cppi41_dd *cdd = dev_get_drvdata(dev);
-	struct cppi41_channel *c, *_c;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	spin_lock_irqsave(&cdd->lock, flags);
 	spin_lock_irqsave(&cdd->lock, flags);
-	list_for_each_entry_safe(c, _c, &cdd->pending, node) {
-		push_desc_queue(c);
-		list_del(&c->node);
-	}
+	cdd->is_suspended = false;
+	cppi41_run_queue(cdd);
 	spin_unlock_irqrestore(&cdd->lock, flags);
 	spin_unlock_irqrestore(&cdd->lock, flags);
 
 
 	return 0;
 	return 0;

+ 6 - 13
drivers/dma/pl330.c

@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
 {
 	struct pl330_thread *thrd = NULL;
 	struct pl330_thread *thrd = NULL;
-	unsigned long flags;
 	int chans, i;
 	int chans, i;
 
 
 	if (pl330->state == DYING)
 	if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 
 
 	chans = pl330->pcfg.num_chan;
 	chans = pl330->pcfg.num_chan;
 
 
-	spin_lock_irqsave(&pl330->lock, flags);
-
 	for (i = 0; i < chans; i++) {
 	for (i = 0; i < chans; i++) {
 		thrd = &pl330->channels[i];
 		thrd = &pl330->channels[i];
 		if ((thrd->free) && (!_manager_ns(thrd) ||
 		if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 		thrd = NULL;
 		thrd = NULL;
 	}
 	}
 
 
-	spin_unlock_irqrestore(&pl330->lock, flags);
-
 	return thrd;
 	return thrd;
 }
 }
 
 
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
 static void pl330_release_channel(struct pl330_thread *thrd)
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
 {
 	struct pl330_dmac *pl330;
 	struct pl330_dmac *pl330;
-	unsigned long flags;
 
 
 	if (!thrd || thrd->free)
 	if (!thrd || thrd->free)
 		return;
 		return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
 
 
 	pl330 = thrd->dmac;
 	pl330 = thrd->dmac;
 
 
-	spin_lock_irqsave(&pl330->lock, flags);
 	_free_event(thrd, thrd->ev);
 	_free_event(thrd, thrd->ev);
 	thrd->free = true;
 	thrd->free = true;
-	spin_unlock_irqrestore(&pl330->lock, flags);
 }
 }
 
 
 /* Initialize the structure for PL330 configuration, that can be used
 /* Initialize the structure for PL330 configuration, that can be used
@@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
 	struct pl330_dmac *pl330 = pch->dmac;
 	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 
 	dma_cookie_init(chan);
 	dma_cookie_init(chan);
 	pch->cyclic = false;
 	pch->cyclic = false;
 
 
 	pch->thread = pl330_request_channel(pl330);
 	pch->thread = pl330_request_channel(pl330);
 	if (!pch->thread) {
 	if (!pch->thread) {
-		spin_unlock_irqrestore(&pch->lock, flags);
+		spin_unlock_irqrestore(&pl330->lock, flags);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 
 
 	return 1;
 	return 1;
 }
 }
@@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
 	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	tasklet_kill(&pch->task);
 	tasklet_kill(&pch->task);
 
 
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 
 	pl330_release_channel(pch->thread);
 	pl330_release_channel(pch->thread);
 	pch->thread = NULL;
 	pch->thread = NULL;
@@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
 	if (pch->cyclic)
 	if (pch->cyclic)
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
 }

+ 3 - 0
drivers/gpu/drm/i915/intel_display.c

@@ -2251,6 +2251,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 	intel_fill_fb_ggtt_view(&view, fb, rotation);
 	intel_fill_fb_ggtt_view(&view, fb, rotation);
 	vma = i915_gem_object_to_ggtt(obj, &view);
 	vma = i915_gem_object_to_ggtt(obj, &view);
 
 
+	if (WARN_ON_ONCE(!vma))
+		return;
+
 	i915_vma_unpin_fence(vma);
 	i915_vma_unpin_fence(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
 	i915_gem_object_unpin_from_display_plane(vma);
 }
 }

+ 12 - 16
drivers/hid/hid-cp2112.c

@@ -168,7 +168,7 @@ struct cp2112_device {
 	atomic_t xfer_avail;
 	atomic_t xfer_avail;
 	struct gpio_chip gc;
 	struct gpio_chip gc;
 	u8 *in_out_buffer;
 	u8 *in_out_buffer;
-	spinlock_t lock;
+	struct mutex lock;
 
 
 	struct gpio_desc *desc[8];
 	struct gpio_desc *desc[8];
 	bool gpio_poll;
 	bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 	ret = 0;
 	ret = 0;
 
 
 exit:
 exit:
-	spin_unlock_irqrestore(&dev->lock, flags);
-	return ret <= 0 ? ret : -EIO;
+	mutex_unlock(&dev->lock);
+	return ret < 0 ? ret : -EIO;
 }
 }
 
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	buf[0] = CP2112_GPIO_SET;
 	buf[0] = CP2112_GPIO_SET;
 	buf[1] = value ? 0xff : 0;
 	buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 	if (ret < 0)
 	if (ret < 0)
 		hid_err(hdev, "error setting GPIO values: %d\n", ret);
 		hid_err(hdev, "error setting GPIO values: %d\n", ret);
 
 
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 }
 }
 
 
 static int cp2112_gpio_get_all(struct gpio_chip *chip)
 static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
 				 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
 				 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
 	ret = buf[1];
 	ret = buf[1];
 
 
 exit:
 exit:
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
 	struct hid_device *hdev = dev->hdev;
 	u8 *buf = dev->in_out_buffer;
 	u8 *buf = dev->in_out_buffer;
-	unsigned long flags;
 	int ret;
 	int ret;
 
 
-	spin_lock_irqsave(&dev->lock, flags);
+	mutex_lock(&dev->lock);
 
 
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 		goto fail;
 		goto fail;
 	}
 	}
 
 
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 
 
 	/*
 	/*
 	 * Set gpio value when output direction is already set,
 	 * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 	return 0;
 	return 0;
 
 
 fail:
 fail:
-	spin_unlock_irqrestore(&dev->lock, flags);
+	mutex_unlock(&dev->lock);
 	return ret < 0 ? ret : -EIO;
 	return ret < 0 ? ret : -EIO;
 }
 }
 
 
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
 	if (!dev->in_out_buffer)
 	if (!dev->in_out_buffer)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	spin_lock_init(&dev->lock);
+	mutex_init(&dev->lock);
 
 
 	ret = hid_parse(hdev);
 	ret = hid_parse(hdev);
 	if (ret) {
 	if (ret) {

+ 3 - 0
drivers/hid/hid-ids.h

@@ -76,6 +76,9 @@
 #define USB_VENDOR_ID_ALPS_JP		0x044E
 #define USB_VENDOR_ID_ALPS_JP		0x044E
 #define HID_DEVICE_ID_ALPS_U1_DUAL	0x120B
 #define HID_DEVICE_ID_ALPS_U1_DUAL	0x120B
 
 
+#define USB_VENDOR_ID_AMI		0x046b
+#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE	0xff10
+
 #define USB_VENDOR_ID_ANTON		0x1130
 #define USB_VENDOR_ID_ANTON		0x1130
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD	0x3101
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD	0x3101
 
 

+ 1 - 1
drivers/hid/hid-lg.c

@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
 		.driver_data = LG_NOGET | LG_FF4 },
 		.driver_data = LG_NOGET | LG_FF4 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
-		.driver_data = LG_FF2 },
+		.driver_data = LG_NOGET | LG_FF2 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
 		.driver_data = LG_FF3 },
 		.driver_data = LG_FF3 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),

+ 1 - 0
drivers/hid/usbhid/hid-quirks.c

@@ -57,6 +57,7 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },

+ 15 - 13
drivers/hid/wacom_wac.c

@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
 		wacom->id[0] = STYLUS_DEVICE_ID;
 		wacom->id[0] = STYLUS_DEVICE_ID;
 	}
 	}
 
 
-	pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
-	if (features->pressure_max > 255)
-		pressure = (pressure << 1) | ((data[4] >> 6) & 1);
-	pressure += (features->pressure_max + 1) / 2;
-
-	input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
-	input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
-	input_report_abs(input, ABS_PRESSURE, pressure);
-
-	input_report_key(input, BTN_TOUCH, data[4] & 0x08);
-	input_report_key(input, BTN_STYLUS, data[4] & 0x10);
-	/* Only allow the stylus2 button to be reported for the pen tool. */
-	input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+	if (prox) {
+		pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+		if (features->pressure_max > 255)
+			pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+		pressure += (features->pressure_max + 1) / 2;
+
+		input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+		input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+		input_report_abs(input, ABS_PRESSURE, pressure);
+
+		input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+		input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+		/* Only allow the stylus2 button to be reported for the pen tool. */
+		input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+	}
 
 
 	if (!prox)
 	if (!prox)
 		wacom->id[0] = 0;
 		wacom->id[0] = 0;

+ 4 - 4
drivers/i2c/busses/i2c-cadence.c

@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
 		goto err_clk_dis;
 		goto err_clk_dis;
 	}
 	}
 
 
-	ret = i2c_add_adapter(&id->adap);
-	if (ret < 0)
-		goto err_clk_dis;
-
 	/*
 	/*
 	 * Cadence I2C controller has a bug wherein it generates
 	 * Cadence I2C controller has a bug wherein it generates
 	 * invalid read transaction after HW timeout in master receiver mode.
 	 * invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
 	 */
 	 */
 	cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
 	cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
 
 
+	ret = i2c_add_adapter(&id->adap);
+	if (ret < 0)
+		goto err_clk_dis;
+
 	dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
 	dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
 		 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
 		 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
 
 

+ 20 - 0
drivers/i2c/busses/i2c-imx-lpi2c.c

@@ -28,6 +28,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
+#ifdef CONFIG_PM_SLEEP
+static int lpi2c_imx_suspend(struct device *dev)
+{
+	pinctrl_pm_select_sleep_state(dev);
+
+	return 0;
+}
+
+static int lpi2c_imx_resume(struct device *dev)
+{
+	pinctrl_pm_select_default_state(dev);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume);
+
 static struct platform_driver lpi2c_imx_driver = {
 static struct platform_driver lpi2c_imx_driver = {
 	.probe = lpi2c_imx_probe,
 	.probe = lpi2c_imx_probe,
 	.remove = lpi2c_imx_remove,
 	.remove = lpi2c_imx_remove,
 	.driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.name = DRIVER_NAME,
 		.of_match_table = lpi2c_imx_of_match,
 		.of_match_table = lpi2c_imx_of_match,
+		.pm = &imx_lpi2c_pm,
 	},
 	},
 };
 };
 
 

+ 2 - 2
drivers/input/rmi4/rmi_driver.c

@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
 	data->enabled = true;
 	data->enabled = true;
 	if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 	if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 		retval = disable_irq_wake(irq);
 		retval = disable_irq_wake(irq);
-		if (!retval)
+		if (retval)
 			dev_warn(&rmi_dev->dev,
 			dev_warn(&rmi_dev->dev,
 				 "Failed to disable irq for wake: %d\n",
 				 "Failed to disable irq for wake: %d\n",
 				 retval);
 				 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
 	disable_irq(irq);
 	disable_irq(irq);
 	if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 	if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
 		retval = enable_irq_wake(irq);
 		retval = enable_irq_wake(irq);
-		if (!retval)
+		if (retval)
 			dev_warn(&rmi_dev->dev,
 			dev_warn(&rmi_dev->dev,
 				 "Failed to enable irq for wake: %d\n",
 				 "Failed to enable irq for wake: %d\n",
 				 retval);
 				 retval);

+ 1 - 1
drivers/input/touchscreen/wm97xx-core.c

@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
 	}
 	}
 	platform_set_drvdata(wm->battery_dev, wm);
 	platform_set_drvdata(wm->battery_dev, wm);
 	wm->battery_dev->dev.parent = dev;
 	wm->battery_dev->dev.parent = dev;
-	wm->battery_dev->dev.platform_data = pdata->batt_pdata;
+	wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
 	ret = platform_device_add(wm->battery_dev);
 	ret = platform_device_add(wm->battery_dev);
 	if (ret < 0)
 	if (ret < 0)
 		goto batt_reg_err;
 		goto batt_reg_err;

+ 5 - 0
drivers/md/md.c

@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev)
 	if (start_readonly && mddev->ro == 0)
 	if (start_readonly && mddev->ro == 0)
 		mddev->ro = 2; /* read-only, but switch on first write */
 		mddev->ro = 2; /* read-only, but switch on first write */
 
 
+	/*
+	 * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
+	 * up mddev->thread. It is important to initialize critical
+	 * resources for mddev->thread BEFORE calling pers->run().
+	 */
 	err = pers->run(mddev);
 	err = pers->run(mddev);
 	if (err)
 	if (err)
 		pr_warn("md: pers->run() failed ...\n");
 		pr_warn("md: pers->run() failed ...\n");

+ 88 - 18
drivers/md/raid5-cache.c

@@ -162,6 +162,8 @@ struct r5l_log {
 
 
 	/* to submit async io_units, to fulfill ordering of flush */
 	/* to submit async io_units, to fulfill ordering of flush */
 	struct work_struct deferred_io_work;
 	struct work_struct deferred_io_work;
+	/* to disable write back during in degraded mode */
+	struct work_struct disable_writeback_work;
 };
 };
 
 
 /*
 /*
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
 		r5l_do_submit_io(log, io);
 		r5l_do_submit_io(log, io);
 }
 }
 
 
+static void r5c_disable_writeback_async(struct work_struct *work)
+{
+	struct r5l_log *log = container_of(work, struct r5l_log,
+					   disable_writeback_work);
+	struct mddev *mddev = log->rdev->mddev;
+
+	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+		return;
+	pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
+		mdname(mddev));
+	mddev_suspend(mddev);
+	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+	mddev_resume(mddev);
+}
+
 static void r5l_submit_current_io(struct r5l_log *log)
 static void r5l_submit_current_io(struct r5l_log *log)
 {
 {
 	struct r5l_io_unit *io = log->current_io;
 	struct r5l_io_unit *io = log->current_io;
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log)
 	next_checkpoint = r5c_calculate_new_cp(conf);
 	next_checkpoint = r5c_calculate_new_cp(conf);
 	spin_unlock_irq(&log->io_list_lock);
 	spin_unlock_irq(&log->io_list_lock);
 
 
-	BUG_ON(reclaimable < 0);
-
 	if (reclaimable == 0 || !write_super)
 	if (reclaimable == 0 || !write_super)
 		return;
 		return;
 
 
@@ -2062,7 +2077,7 @@ static int
 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
 				       struct r5l_recovery_ctx *ctx)
 				       struct r5l_recovery_ctx *ctx)
 {
 {
-	struct stripe_head *sh, *next;
+	struct stripe_head *sh;
 	struct mddev *mddev = log->rdev->mddev;
 	struct mddev *mddev = log->rdev->mddev;
 	struct page *page;
 	struct page *page;
 	sector_t next_checkpoint = MaxSector;
 	sector_t next_checkpoint = MaxSector;
@@ -2076,7 +2091,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
 
 
 	WARN_ON(list_empty(&ctx->cached_list));
 	WARN_ON(list_empty(&ctx->cached_list));
 
 
-	list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+	list_for_each_entry(sh, &ctx->cached_list, lru) {
 		struct r5l_meta_block *mb;
 		struct r5l_meta_block *mb;
 		int i;
 		int i;
 		int offset;
 		int offset;
@@ -2126,14 +2141,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
 		ctx->pos = write_pos;
 		ctx->pos = write_pos;
 		ctx->seq += 1;
 		ctx->seq += 1;
 		next_checkpoint = sh->log_start;
 		next_checkpoint = sh->log_start;
-		list_del_init(&sh->lru);
-		raid5_release_stripe(sh);
 	}
 	}
 	log->next_checkpoint = next_checkpoint;
 	log->next_checkpoint = next_checkpoint;
 	__free_page(page);
 	__free_page(page);
 	return 0;
 	return 0;
 }
 }
 
 
+static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
+						 struct r5l_recovery_ctx *ctx)
+{
+	struct mddev *mddev = log->rdev->mddev;
+	struct r5conf *conf = mddev->private;
+	struct stripe_head *sh, *next;
+
+	if (ctx->data_only_stripes == 0)
+		return;
+
+	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
+
+	list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+		r5c_make_stripe_write_out(sh);
+		set_bit(STRIPE_HANDLE, &sh->state);
+		list_del_init(&sh->lru);
+		raid5_release_stripe(sh);
+	}
+
+	md_wakeup_thread(conf->mddev->thread);
+	/* reuse conf->wait_for_quiescent in recovery */
+	wait_event(conf->wait_for_quiescent,
+		   atomic_read(&conf->active_stripes) == 0);
+
+	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+}
+
 static int r5l_recovery_log(struct r5l_log *log)
 static int r5l_recovery_log(struct r5l_log *log)
 {
 {
 	struct mddev *mddev = log->rdev->mddev;
 	struct mddev *mddev = log->rdev->mddev;
@@ -2160,32 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log)
 	pos = ctx.pos;
 	pos = ctx.pos;
 	ctx.seq += 10000;
 	ctx.seq += 10000;
 
 
-	if (ctx.data_only_stripes == 0) {
-		log->next_checkpoint = ctx.pos;
-		r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
-		ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
-	}
 
 
 	if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
 	if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
 		pr_debug("md/raid:%s: starting from clean shutdown\n",
 		pr_debug("md/raid:%s: starting from clean shutdown\n",
 			 mdname(mddev));
 			 mdname(mddev));
-	else {
+	else
 		pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
 		pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
 			 mdname(mddev), ctx.data_only_stripes,
 			 mdname(mddev), ctx.data_only_stripes,
 			 ctx.data_parity_stripes);
 			 ctx.data_parity_stripes);
 
 
-		if (ctx.data_only_stripes > 0)
-			if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
-				pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
-				       mdname(mddev));
-				return -EIO;
-			}
+	if (ctx.data_only_stripes == 0) {
+		log->next_checkpoint = ctx.pos;
+		r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+		ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
+	} else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+		pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+		       mdname(mddev));
+		return -EIO;
 	}
 	}
 
 
 	log->log_start = ctx.pos;
 	log->log_start = ctx.pos;
 	log->seq = ctx.seq;
 	log->seq = ctx.seq;
 	log->last_checkpoint = pos;
 	log->last_checkpoint = pos;
 	r5l_write_super(log, pos);
 	r5l_write_super(log, pos);
+
+	r5c_recovery_flush_data_only_stripes(log, &ctx);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2247,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
 	    val > R5C_JOURNAL_MODE_WRITE_BACK)
 	    val > R5C_JOURNAL_MODE_WRITE_BACK)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	if (raid5_calc_degraded(conf) > 0 &&
+	    val == R5C_JOURNAL_MODE_WRITE_BACK)
+		return -EINVAL;
+
 	mddev_suspend(mddev);
 	mddev_suspend(mddev);
 	conf->log->r5c_journal_mode = val;
 	conf->log->r5c_journal_mode = val;
 	mddev_resume(mddev);
 	mddev_resume(mddev);
@@ -2301,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
 		set_bit(STRIPE_R5C_CACHING, &sh->state);
 		set_bit(STRIPE_R5C_CACHING, &sh->state);
 	}
 	}
 
 
+	/*
+	 * When run in degraded mode, array is set to write-through mode.
+	 * This check helps drain pending write safely in the transition to
+	 * write-through mode.
+	 */
+	if (s->failed) {
+		r5c_make_stripe_write_out(sh);
+		return -EAGAIN;
+	}
+
 	for (i = disks; i--; ) {
 	for (i = disks; i--; ) {
 		dev = &sh->dev[i];
 		dev = &sh->dev[i];
 		/* if non-overwrite, use writing-out phase */
 		/* if non-overwrite, use writing-out phase */
@@ -2351,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh)
 			struct page *p = sh->dev[i].orig_page;
 			struct page *p = sh->dev[i].orig_page;
 
 
 			sh->dev[i].orig_page = sh->dev[i].page;
 			sh->dev[i].orig_page = sh->dev[i].page;
+			clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
 			if (!using_disk_info_extra_page)
 			if (!using_disk_info_extra_page)
 				put_page(p);
 				put_page(p);
 		}
 		}
@@ -2555,6 +2610,19 @@ ioerr:
 	return ret;
 	return ret;
 }
 }
 
 
+void r5c_update_on_rdev_error(struct mddev *mddev)
+{
+	struct r5conf *conf = mddev->private;
+	struct r5l_log *log = conf->log;
+
+	if (!log)
+		return;
+
+	if (raid5_calc_degraded(conf) > 0 &&
+	    conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
+		schedule_work(&log->disable_writeback_work);
+}
+
 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 {
 {
 	struct request_queue *q = bdev_get_queue(rdev->bdev);
 	struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -2627,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 	spin_lock_init(&log->no_space_stripes_lock);
 	spin_lock_init(&log->no_space_stripes_lock);
 
 
 	INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
 	INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+	INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
 
 
 	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
 	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
 	INIT_LIST_HEAD(&log->stripe_in_journal_list);
 	INIT_LIST_HEAD(&log->stripe_in_journal_list);
@@ -2659,6 +2728,7 @@ io_kc:
 
 
 void r5l_exit_log(struct r5l_log *log)
 void r5l_exit_log(struct r5l_log *log)
 {
 {
+	flush_work(&log->disable_writeback_work);
 	md_unregister_thread(&log->reclaim_thread);
 	md_unregister_thread(&log->reclaim_thread);
 	mempool_destroy(log->meta_pool);
 	mempool_destroy(log->meta_pool);
 	bioset_free(log->bs);
 	bioset_free(log->bs);

+ 94 - 27
drivers/md/raid5.c

@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
  * of the two sections, and some non-in_sync devices may
  * of the two sections, and some non-in_sync devices may
  * be insync in the section most affected by failed devices.
  * be insync in the section most affected by failed devices.
  */
  */
-static int calc_degraded(struct r5conf *conf)
+int raid5_calc_degraded(struct r5conf *conf)
 {
 {
 	int degraded, degraded2;
 	int degraded, degraded2;
 	int i;
 	int i;
@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf)
 	if (conf->mddev->reshape_position == MaxSector)
 	if (conf->mddev->reshape_position == MaxSector)
 		return conf->mddev->degraded > conf->max_degraded;
 		return conf->mddev->degraded > conf->max_degraded;
 
 
-	degraded = calc_degraded(conf);
+	degraded = raid5_calc_degraded(conf);
 	if (degraded > conf->max_degraded)
 	if (degraded > conf->max_degraded)
 		return 1;
 		return 1;
 	return 0;
 	return 0;
@@ -1015,7 +1015,17 @@ again:
 
 
 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
-			sh->dev[i].vec.bv_page = sh->dev[i].page;
+
+			if (!op_is_write(op) &&
+			    test_bit(R5_InJournal, &sh->dev[i].flags))
+				/*
+				 * issuing read for a page in journal, this
+				 * must be preparing for prexor in rmw; read
+				 * the data into orig_page
+				 */
+				sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
+			else
+				sh->dev[i].vec.bv_page = sh->dev[i].page;
 			bi->bi_vcnt = 1;
 			bi->bi_vcnt = 1;
 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			bi->bi_io_vec[0].bv_offset = 0;
 			bi->bi_io_vec[0].bv_offset = 0;
@@ -2380,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi)
 		} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 		} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 			clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 			clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 
 
+		if (test_bit(R5_InJournal, &sh->dev[i].flags))
+			/*
+			 * end read for a page in journal, this
+			 * must be preparing for prexor in rmw
+			 */
+			set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
+
 		if (atomic_read(&rdev->read_errors))
 		if (atomic_read(&rdev->read_errors))
 			atomic_set(&rdev->read_errors, 0);
 			atomic_set(&rdev->read_errors, 0);
 	} else {
 	} else {
@@ -2538,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 
 
 	spin_lock_irqsave(&conf->device_lock, flags);
 	spin_lock_irqsave(&conf->device_lock, flags);
 	clear_bit(In_sync, &rdev->flags);
 	clear_bit(In_sync, &rdev->flags);
-	mddev->degraded = calc_degraded(conf);
+	mddev->degraded = raid5_calc_degraded(conf);
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 
 
@@ -2552,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 		bdevname(rdev->bdev, b),
 		bdevname(rdev->bdev, b),
 		mdname(mddev),
 		mdname(mddev),
 		conf->raid_disks - mddev->degraded);
 		conf->raid_disks - mddev->degraded);
+	r5c_update_on_rdev_error(mddev);
 }
 }
 
 
 /*
 /*
@@ -2880,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
 	return r_sector;
 	return r_sector;
 }
 }
 
 
+/*
+ * There are cases where we want handle_stripe_dirtying() and
+ * schedule_reconstruction() to delay towrite to some dev of a stripe.
+ *
+ * This function checks whether we want to delay the towrite. Specifically,
+ * we delay the towrite when:
+ *
+ *   1. degraded stripe has a non-overwrite to the missing dev, AND this
+ *      stripe has data in journal (for other devices).
+ *
+ *      In this case, when reading data for the non-overwrite dev, it is
+ *      necessary to handle complex rmw of write back cache (prexor with
+ *      orig_page, and xor with page). To keep read path simple, we would
+ *      like to flush data in journal to RAID disks first, so complex rmw
+ *      is handled in the write patch (handle_stripe_dirtying).
+ *
+ */
+static inline bool delay_towrite(struct r5dev *dev,
+				   struct stripe_head_state *s)
+{
+	return !test_bit(R5_OVERWRITE, &dev->flags) &&
+		!test_bit(R5_Insync, &dev->flags) && s->injournal;
+}
+
 static void
 static void
 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
 			 int rcw, int expand)
 			 int rcw, int expand)
@@ -2900,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
 		for (i = disks; i--; ) {
 		for (i = disks; i--; ) {
 			struct r5dev *dev = &sh->dev[i];
 			struct r5dev *dev = &sh->dev[i];
 
 
-			if (dev->towrite) {
+			if (dev->towrite && !delay_towrite(dev, s)) {
 				set_bit(R5_LOCKED, &dev->flags);
 				set_bit(R5_LOCKED, &dev->flags);
 				set_bit(R5_Wantdrain, &dev->flags);
 				set_bit(R5_Wantdrain, &dev->flags);
 				if (!expand)
 				if (!expand)
@@ -3295,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
 	return rv;
 	return rv;
 }
 }
 
 
-/* fetch_block - checks the given member device to see if its data needs
- * to be read or computed to satisfy a request.
- *
- * Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill to continue
- */
-
 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
 			   int disk_idx, int disks)
 			   int disk_idx, int disks)
 {
 {
@@ -3392,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
 	return 0;
 	return 0;
 }
 }
 
 
+/* fetch_block - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill to continue
+ */
 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
 		       int disk_idx, int disks)
 		       int disk_idx, int disks)
 {
 {
@@ -3478,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh,
 	 * midst of changing due to a write
 	 * midst of changing due to a write
 	 */
 	 */
 	if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
 	if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
-	    !sh->reconstruct_state)
+	    !sh->reconstruct_state) {
+
+		/*
+		 * For degraded stripe with data in journal, do not handle
+		 * read requests yet, instead, flush the stripe to raid
+		 * disks first, this avoids handling complex rmw of write
+		 * back cache (prexor with orig_page, and then xor with
+		 * page) in the read path
+		 */
+		if (s->injournal && s->failed) {
+			if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+				r5c_make_stripe_write_out(sh);
+			goto out;
+		}
+
 		for (i = disks; i--; )
 		for (i = disks; i--; )
 			if (fetch_block(sh, s, i, disks))
 			if (fetch_block(sh, s, i, disks))
 				break;
 				break;
+	}
+out:
 	set_bit(STRIPE_HANDLE, &sh->state);
 	set_bit(STRIPE_HANDLE, &sh->state);
 }
 }
 
 
@@ -3594,6 +3651,21 @@ unhash:
 		break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
 		break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
 }
 }
 
 
+/*
+ * For RMW in write back cache, we need extra page in prexor to store the
+ * old data. This page is stored in dev->orig_page.
+ *
+ * This function checks whether we have data for prexor. The exact logic
+ * is:
+ *       R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
+ */
+static inline bool uptodate_for_rmw(struct r5dev *dev)
+{
+	return (test_bit(R5_UPTODATE, &dev->flags)) &&
+		(!test_bit(R5_InJournal, &dev->flags) ||
+		 test_bit(R5_OrigPageUPTDODATE, &dev->flags));
+}
+
 static int handle_stripe_dirtying(struct r5conf *conf,
 static int handle_stripe_dirtying(struct r5conf *conf,
 				  struct stripe_head *sh,
 				  struct stripe_head *sh,
 				  struct stripe_head_state *s,
 				  struct stripe_head_state *s,
@@ -3622,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
 	} else for (i = disks; i--; ) {
 	} else for (i = disks; i--; ) {
 		/* would I have to read this buffer for read_modify_write */
 		/* would I have to read this buffer for read_modify_write */
 		struct r5dev *dev = &sh->dev[i];
 		struct r5dev *dev = &sh->dev[i];
-		if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+		if (((dev->towrite && !delay_towrite(dev, s)) ||
+		     i == sh->pd_idx || i == sh->qd_idx ||
 		     test_bit(R5_InJournal, &dev->flags)) &&
 		     test_bit(R5_InJournal, &dev->flags)) &&
 		    !test_bit(R5_LOCKED, &dev->flags) &&
 		    !test_bit(R5_LOCKED, &dev->flags) &&
-		    !((test_bit(R5_UPTODATE, &dev->flags) &&
-		       (!test_bit(R5_InJournal, &dev->flags) ||
-			dev->page != dev->orig_page)) ||
+		    !(uptodate_for_rmw(dev) ||
 		      test_bit(R5_Wantcompute, &dev->flags))) {
 		      test_bit(R5_Wantcompute, &dev->flags))) {
 			if (test_bit(R5_Insync, &dev->flags))
 			if (test_bit(R5_Insync, &dev->flags))
 				rmw++;
 				rmw++;
@@ -3639,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
 		    i != sh->pd_idx && i != sh->qd_idx &&
 		    i != sh->pd_idx && i != sh->qd_idx &&
 		    !test_bit(R5_LOCKED, &dev->flags) &&
 		    !test_bit(R5_LOCKED, &dev->flags) &&
 		    !(test_bit(R5_UPTODATE, &dev->flags) ||
 		    !(test_bit(R5_UPTODATE, &dev->flags) ||
-		      test_bit(R5_InJournal, &dev->flags) ||
 		      test_bit(R5_Wantcompute, &dev->flags))) {
 		      test_bit(R5_Wantcompute, &dev->flags))) {
 			if (test_bit(R5_Insync, &dev->flags))
 			if (test_bit(R5_Insync, &dev->flags))
 				rcw++;
 				rcw++;
@@ -3689,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
 
 
 		for (i = disks; i--; ) {
 		for (i = disks; i--; ) {
 			struct r5dev *dev = &sh->dev[i];
 			struct r5dev *dev = &sh->dev[i];
-			if ((dev->towrite ||
+			if (((dev->towrite && !delay_towrite(dev, s)) ||
 			     i == sh->pd_idx || i == sh->qd_idx ||
 			     i == sh->pd_idx || i == sh->qd_idx ||
 			     test_bit(R5_InJournal, &dev->flags)) &&
 			     test_bit(R5_InJournal, &dev->flags)) &&
 			    !test_bit(R5_LOCKED, &dev->flags) &&
 			    !test_bit(R5_LOCKED, &dev->flags) &&
-			    !((test_bit(R5_UPTODATE, &dev->flags) &&
-			       (!test_bit(R5_InJournal, &dev->flags) ||
-				dev->page != dev->orig_page)) ||
+			    !(uptodate_for_rmw(dev) ||
 			      test_bit(R5_Wantcompute, &dev->flags)) &&
 			      test_bit(R5_Wantcompute, &dev->flags)) &&
 			    test_bit(R5_Insync, &dev->flags)) {
 			    test_bit(R5_Insync, &dev->flags)) {
 				if (test_bit(STRIPE_PREREAD_ACTIVE,
 				if (test_bit(STRIPE_PREREAD_ACTIVE,
@@ -3722,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
 			    i != sh->pd_idx && i != sh->qd_idx &&
 			    i != sh->pd_idx && i != sh->qd_idx &&
 			    !test_bit(R5_LOCKED, &dev->flags) &&
 			    !test_bit(R5_LOCKED, &dev->flags) &&
 			    !(test_bit(R5_UPTODATE, &dev->flags) ||
 			    !(test_bit(R5_UPTODATE, &dev->flags) ||
-			      test_bit(R5_InJournal, &dev->flags) ||
 			      test_bit(R5_Wantcompute, &dev->flags))) {
 			      test_bit(R5_Wantcompute, &dev->flags))) {
 				rcw++;
 				rcw++;
 				if (test_bit(R5_Insync, &dev->flags) &&
 				if (test_bit(R5_Insync, &dev->flags) &&
@@ -7025,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
 	/*
 	/*
 	 * 0 for a fully functional array, 1 or 2 for a degraded array.
 	 * 0 for a fully functional array, 1 or 2 for a degraded array.
 	 */
 	 */
-	mddev->degraded = calc_degraded(conf);
+	mddev->degraded = raid5_calc_degraded(conf);
 
 
 	if (has_failed(conf)) {
 	if (has_failed(conf)) {
 		pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
 		pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
@@ -7272,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
 		}
 		}
 	}
 	}
 	spin_lock_irqsave(&conf->device_lock, flags);
 	spin_lock_irqsave(&conf->device_lock, flags);
-	mddev->degraded = calc_degraded(conf);
+	mddev->degraded = raid5_calc_degraded(conf);
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 	print_raid5_conf(conf);
 	print_raid5_conf(conf);
 	return count;
 	return count;
@@ -7632,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
 		 * pre and post number of devices.
 		 * pre and post number of devices.
 		 */
 		 */
 		spin_lock_irqsave(&conf->device_lock, flags);
 		spin_lock_irqsave(&conf->device_lock, flags);
-		mddev->degraded = calc_degraded(conf);
+		mddev->degraded = raid5_calc_degraded(conf);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 	}
 	mddev->raid_disks = conf->raid_disks;
 	mddev->raid_disks = conf->raid_disks;
@@ -7720,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
 		} else {
 		} else {
 			int d;
 			int d;
 			spin_lock_irq(&conf->device_lock);
 			spin_lock_irq(&conf->device_lock);
-			mddev->degraded = calc_degraded(conf);
+			mddev->degraded = raid5_calc_degraded(conf);
 			spin_unlock_irq(&conf->device_lock);
 			spin_unlock_irq(&conf->device_lock);
 			for (d = conf->raid_disks ;
 			for (d = conf->raid_disks ;
 			     d < conf->raid_disks - mddev->delta_disks;
 			     d < conf->raid_disks - mddev->delta_disks;

+ 7 - 0
drivers/md/raid5.h

@@ -322,6 +322,11 @@ enum r5dev_flags {
 			 * data and parity being written are in the journal
 			 * data and parity being written are in the journal
 			 * device
 			 * device
 			 */
 			 */
+	R5_OrigPageUPTDODATE,	/* with write back cache, we read old data into
+				 * dev->orig_page for prexor. When this flag is
+				 * set, orig_page contains latest data in the
+				 * raid disk.
+				 */
 };
 };
 
 
 /*
 /*
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
 extern struct stripe_head *
 extern struct stripe_head *
 raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 			int previous, int noblock, int noquiesce);
 			int previous, int noblock, int noquiesce);
+extern int raid5_calc_degraded(struct r5conf *conf);
 extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
 extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
 extern void r5l_exit_log(struct r5l_log *log);
 extern void r5l_exit_log(struct r5l_log *log);
 extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
 extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
 extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
 extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
 extern void r5c_check_cached_full_stripe(struct r5conf *conf);
 extern void r5c_check_cached_full_stripe(struct r5conf *conf);
 extern struct md_sysfs_entry r5c_journal_mode;
 extern struct md_sysfs_entry r5c_journal_mode;
+extern void r5c_update_on_rdev_error(struct mddev *mddev);
 #endif
 #endif

+ 43 - 2
drivers/net/ethernet/adaptec/starfire.c

@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
 		if (skb == NULL)
 		if (skb == NULL)
 			break;
 			break;
 		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 		np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(np->pci_dev,
+					  np->rx_info[i].mapping)) {
+			dev_kfree_skb(skb);
+			np->rx_info[i].skb = NULL;
+			break;
+		}
 		/* Grrr, we cannot offset to correctly align the IP header. */
 		/* Grrr, we cannot offset to correctly align the IP header. */
 		np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
 		np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
 	}
 	}
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 {
 {
 	struct netdev_private *np = netdev_priv(dev);
 	struct netdev_private *np = netdev_priv(dev);
 	unsigned int entry;
 	unsigned int entry;
+	unsigned int prev_tx;
 	u32 status;
 	u32 status;
-	int i;
+	int i, j;
 
 
 	/*
 	/*
 	 * be cautious here, wrapping the queue has weird semantics
 	 * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 	}
 	}
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 
 
+	prev_tx = np->cur_tx;
 	entry = np->cur_tx % TX_RING_SIZE;
 	entry = np->cur_tx % TX_RING_SIZE;
 	for (i = 0; i < skb_num_frags(skb); i++) {
 	for (i = 0; i < skb_num_frags(skb); i++) {
 		int wrap_ring = 0;
 		int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 					       skb_frag_size(this_frag),
 					       skb_frag_size(this_frag),
 					       PCI_DMA_TODEVICE);
 					       PCI_DMA_TODEVICE);
 		}
 		}
+		if (pci_dma_mapping_error(np->pci_dev,
+					  np->tx_info[entry].mapping)) {
+			dev->stats.tx_dropped++;
+			goto err_out;
+		}
 
 
 		np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
 		np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
 		np->tx_ring[entry].status = cpu_to_le32(status);
 		np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 		netif_stop_queue(dev);
 		netif_stop_queue(dev);
 
 
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
-}
 
 
+err_out:
+	entry = prev_tx % TX_RING_SIZE;
+	np->tx_info[entry].skb = NULL;
+	if (i > 0) {
+		pci_unmap_single(np->pci_dev,
+				 np->tx_info[entry].mapping,
+				 skb_first_frag_len(skb),
+				 PCI_DMA_TODEVICE);
+		np->tx_info[entry].mapping = 0;
+		entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+		for (j = 1; j < i; j++) {
+			pci_unmap_single(np->pci_dev,
+					 np->tx_info[entry].mapping,
+					 skb_frag_size(
+						&skb_shinfo(skb)->frags[j-1]),
+					 PCI_DMA_TODEVICE);
+			entry++;
+		}
+	}
+	dev_kfree_skb_any(skb);
+	np->cur_tx = prev_tx;
+	return NETDEV_TX_OK;
+}
 
 
 /* The interrupt handler does all of the Rx thread work and cleans up
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
    after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
 				break;	/* Better luck next round. */
 				break;	/* Better luck next round. */
 			np->rx_info[entry].mapping =
 			np->rx_info[entry].mapping =
 				pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 				pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(np->pci_dev,
+						np->rx_info[entry].mapping)) {
+				dev_kfree_skb(skb);
+				np->rx_info[entry].skb = NULL;
+				break;
+			}
 			np->rx_ring[entry].rxaddr =
 			np->rx_ring[entry].rxaddr =
 				cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
 				cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
 		}
 		}

+ 130 - 58
drivers/net/ethernet/cadence/macb.c

@@ -43,13 +43,13 @@
 #define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
 #define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
 #define MIN_RX_RING_SIZE	64
 #define MIN_RX_RING_SIZE	64
 #define MAX_RX_RING_SIZE	8192
 #define MAX_RX_RING_SIZE	8192
-#define RX_RING_BYTES(bp)	(sizeof(struct macb_dma_desc)	\
+#define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
 				 * (bp)->rx_ring_size)
 				 * (bp)->rx_ring_size)
 
 
 #define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
 #define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
 #define MIN_TX_RING_SIZE	64
 #define MIN_TX_RING_SIZE	64
 #define MAX_TX_RING_SIZE	4096
 #define MAX_TX_RING_SIZE	4096
-#define TX_RING_BYTES(bp)	(sizeof(struct macb_dma_desc)	\
+#define TX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
 				 * (bp)->tx_ring_size)
 				 * (bp)->tx_ring_size)
 
 
 /* level of occupied TX descriptors under which we wake up TX process */
 /* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
  */
  */
 #define MACB_HALT_TIMEOUT	1230
 #define MACB_HALT_TIMEOUT	1230
 
 
+/* DMA buffer descriptor might be different size
+ * depends on hardware configuration.
+ */
+static unsigned int macb_dma_desc_get_size(struct macb *bp)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#endif
+	return sizeof(struct macb_dma_desc);
+}
+
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	/* Dma buffer descriptor is 4 words length (instead of 2 words)
+	 * for 64b GEM.
+	 */
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		idx <<= 1;
+#endif
+	return idx;
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
+{
+	return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+}
+#endif
+
 /* Ring buffer accessors */
 /* Ring buffer accessors */
 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 {
 {
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
 					  unsigned int index)
 					  unsigned int index)
 {
 {
-	return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
+	index = macb_tx_ring_wrap(queue->bp, index);
+	index = macb_adj_dma_desc_idx(queue->bp, index);
+	return &queue->tx_ring[index];
 }
 }
 
 
 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
 	dma_addr_t offset;
 	dma_addr_t offset;
 
 
 	offset = macb_tx_ring_wrap(queue->bp, index) *
 	offset = macb_tx_ring_wrap(queue->bp, index) *
-		 sizeof(struct macb_dma_desc);
+			macb_dma_desc_get_size(queue->bp);
 
 
 	return queue->tx_ring_dma + offset;
 	return queue->tx_ring_dma + offset;
 }
 }
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
 
 
 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 {
 {
-	return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
+	index = macb_rx_ring_wrap(bp, index);
+	index = macb_adj_dma_desc_idx(bp, index);
+	return &bp->rx_ring[index];
 }
 }
 
 
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
 	}
 	}
 }
 }
 
 
-static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
 {
 {
-	desc->addr = (u32)addr;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	desc->addrh = (u32)(addr >> 32);
+	struct macb_dma_desc_64 *desc_64;
+
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+		desc_64 = macb_64b_desc(bp, desc);
+		desc_64->addrh = upper_32_bits(addr);
+	}
 #endif
 #endif
+	desc->addr = lower_32_bits(addr);
+}
+
+static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+{
+	dma_addr_t addr = 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	struct macb_dma_desc_64 *desc_64;
+
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+		desc_64 = macb_64b_desc(bp, desc);
+		addr = ((u64)(desc_64->addrh) << 32);
+	}
+#endif
+	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+	return addr;
 }
 }
 
 
 static void macb_tx_error_task(struct work_struct *work)
 static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
 
 
 	/* Set end of TX queue */
 	/* Set end of TX queue */
 	desc = macb_tx_desc(queue, 0);
 	desc = macb_tx_desc(queue, 0);
-	macb_set_addr(desc, 0);
+	macb_set_addr(bp, desc, 0);
 	desc->ctrl = MACB_BIT(TX_USED);
 	desc->ctrl = MACB_BIT(TX_USED);
 
 
 	/* Make descriptor updates visible to hardware */
 	/* Make descriptor updates visible to hardware */
 	wmb();
 	wmb();
 
 
 	/* Reinitialize the TX desc queue */
 	/* Reinitialize the TX desc queue */
-	queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 #endif
 	/* Make TX ring reflect state of hardware */
 	/* Make TX ring reflect state of hardware */
 	queue->tx_head = 0;
 	queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
 	unsigned int		entry;
 	unsigned int		entry;
 	struct sk_buff		*skb;
 	struct sk_buff		*skb;
 	dma_addr_t		paddr;
 	dma_addr_t		paddr;
+	struct macb_dma_desc *desc;
 
 
 	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
 	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
 			  bp->rx_ring_size) > 0) {
 			  bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
 		rmb();
 		rmb();
 
 
 		bp->rx_prepared_head++;
 		bp->rx_prepared_head++;
+		desc = macb_rx_desc(bp, entry);
 
 
 		if (!bp->rx_skbuff[entry]) {
 		if (!bp->rx_skbuff[entry]) {
 			/* allocate sk_buff for this free entry in ring */
 			/* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
 
 
 			if (entry == bp->rx_ring_size - 1)
 			if (entry == bp->rx_ring_size - 1)
 				paddr |= MACB_BIT(RX_WRAP);
 				paddr |= MACB_BIT(RX_WRAP);
-			macb_set_addr(&(bp->rx_ring[entry]), paddr);
-			bp->rx_ring[entry].ctrl = 0;
+			macb_set_addr(bp, desc, paddr);
+			desc->ctrl = 0;
 
 
 			/* properly align Ethernet header */
 			/* properly align Ethernet header */
 			skb_reserve(skb, NET_IP_ALIGN);
 			skb_reserve(skb, NET_IP_ALIGN);
 		} else {
 		} else {
-			bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
-			bp->rx_ring[entry].ctrl = 0;
+			desc->addr &= ~MACB_BIT(RX_USED);
+			desc->ctrl = 0;
 		}
 		}
 	}
 	}
 
 
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
 		bool rxused;
 		bool rxused;
 
 
 		entry = macb_rx_ring_wrap(bp, bp->rx_tail);
 		entry = macb_rx_ring_wrap(bp, bp->rx_tail);
-		desc = &bp->rx_ring[entry];
+		desc = macb_rx_desc(bp, entry);
 
 
 		/* Make hw descriptor updates visible to CPU */
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 		rmb();
 
 
 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
-		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		addr |= ((u64)(desc->addrh) << 32);
-#endif
+		addr = macb_get_addr(bp, desc);
 		ctrl = desc->ctrl;
 		ctrl = desc->ctrl;
 
 
 		if (!rxused)
 		if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 static inline void macb_init_rx_ring(struct macb *bp)
 static inline void macb_init_rx_ring(struct macb *bp)
 {
 {
 	dma_addr_t addr;
 	dma_addr_t addr;
+	struct macb_dma_desc *desc = NULL;
 	int i;
 	int i;
 
 
 	addr = bp->rx_buffers_dma;
 	addr = bp->rx_buffers_dma;
 	for (i = 0; i < bp->rx_ring_size; i++) {
 	for (i = 0; i < bp->rx_ring_size; i++) {
-		bp->rx_ring[i].addr = addr;
-		bp->rx_ring[i].ctrl = 0;
+		desc = macb_rx_desc(bp, i);
+		macb_set_addr(bp, desc, addr);
+		desc->ctrl = 0;
 		addr += bp->rx_buffer_size;
 		addr += bp->rx_buffer_size;
 	}
 	}
-	bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+	desc->addr |= MACB_BIT(RX_WRAP);
 	bp->rx_tail = 0;
 	bp->rx_tail = 0;
 }
 }
 
 
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
 
 
 	for (tail = bp->rx_tail; budget > 0; tail++) {
 	for (tail = bp->rx_tail; budget > 0; tail++) {
 		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
 		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
-		u32 addr, ctrl;
+		u32 ctrl;
 
 
 		/* Make hw descriptor updates visible to CPU */
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 		rmb();
 
 
-		addr = desc->addr;
 		ctrl = desc->ctrl;
 		ctrl = desc->ctrl;
 
 
-		if (!(addr & MACB_BIT(RX_USED)))
+		if (!(desc->addr & MACB_BIT(RX_USED)))
 			break;
 			break;
 
 
 		if (ctrl & MACB_BIT(RX_SOF)) {
 		if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
 	i = tx_head;
 	i = tx_head;
 	entry = macb_tx_ring_wrap(bp, i);
 	entry = macb_tx_ring_wrap(bp, i);
 	ctrl = MACB_BIT(TX_USED);
 	ctrl = MACB_BIT(TX_USED);
-	desc = &queue->tx_ring[entry];
+	desc = macb_tx_desc(queue, entry);
 	desc->ctrl = ctrl;
 	desc->ctrl = ctrl;
 
 
 	if (lso_ctrl) {
 	if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
 		i--;
 		i--;
 		entry = macb_tx_ring_wrap(bp, i);
 		entry = macb_tx_ring_wrap(bp, i);
 		tx_skb = &queue->tx_skb[entry];
 		tx_skb = &queue->tx_skb[entry];
-		desc = &queue->tx_ring[entry];
+		desc = macb_tx_desc(queue, entry);
 
 
 		ctrl = (u32)tx_skb->size;
 		ctrl = (u32)tx_skb->size;
 		if (eof) {
 		if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
 			ctrl |= MACB_BF(MSS_MFS, mss_mfs);
 			ctrl |= MACB_BF(MSS_MFS, mss_mfs);
 
 
 		/* Set TX buffer descriptor */
 		/* Set TX buffer descriptor */
-		macb_set_addr(desc, tx_skb->mapping);
+		macb_set_addr(bp, desc, tx_skb->mapping);
 		/* desc->addr must be visible to hardware before clearing
 		/* desc->addr must be visible to hardware before clearing
 		 * 'TX_USED' bit in desc->ctrl.
 		 * 'TX_USED' bit in desc->ctrl.
 		 */
 		 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
 		if (!skb)
 		if (!skb)
 			continue;
 			continue;
 
 
-		desc = &bp->rx_ring[i];
-		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		addr |= ((u64)(desc->addrh) << 32);
-#endif
+		desc = macb_rx_desc(bp, i);
+		addr = macb_get_addr(bp, desc);
+
 		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
 		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
 				 DMA_FROM_DEVICE);
 				 DMA_FROM_DEVICE);
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
 static void gem_init_rings(struct macb *bp)
 static void gem_init_rings(struct macb *bp)
 {
 {
 	struct macb_queue *queue;
 	struct macb_queue *queue;
+	struct macb_dma_desc *desc = NULL;
 	unsigned int q;
 	unsigned int q;
 	int i;
 	int i;
 
 
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 		for (i = 0; i < bp->tx_ring_size; i++) {
 		for (i = 0; i < bp->tx_ring_size; i++) {
-			queue->tx_ring[i].addr = 0;
-			queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+			desc = macb_tx_desc(queue, i);
+			macb_set_addr(bp, desc, 0);
+			desc->ctrl = MACB_BIT(TX_USED);
 		}
 		}
-		queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+		desc->ctrl |= MACB_BIT(TX_WRAP);
 		queue->tx_head = 0;
 		queue->tx_head = 0;
 		queue->tx_tail = 0;
 		queue->tx_tail = 0;
 	}
 	}
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 {
 {
 	int i;
 	int i;
+	struct macb_dma_desc *desc = NULL;
 
 
 	macb_init_rx_ring(bp);
 	macb_init_rx_ring(bp);
 
 
 	for (i = 0; i < bp->tx_ring_size; i++) {
 	for (i = 0; i < bp->tx_ring_size; i++) {
-		bp->queues[0].tx_ring[i].addr = 0;
-		bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+		desc = macb_tx_desc(&bp->queues[0], i);
+		macb_set_addr(bp, desc, 0);
+		desc->ctrl = MACB_BIT(TX_USED);
 	}
 	}
 	bp->queues[0].tx_head = 0;
 	bp->queues[0].tx_head = 0;
 	bp->queues[0].tx_tail = 0;
 	bp->queues[0].tx_tail = 0;
-	bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+	desc->ctrl |= MACB_BIT(TX_WRAP);
 }
 }
 
 
 static void macb_reset_hw(struct macb *bp)
 static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
 			dmacfg &= ~GEM_BIT(TXCOEN);
 			dmacfg &= ~GEM_BIT(TXCOEN);
 
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		dmacfg |= GEM_BIT(ADDR64);
+		if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+			dmacfg |= GEM_BIT(ADDR64);
 #endif
 #endif
 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
 		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
 			   dmacfg);
 			   dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
 	macb_configure_dma(bp);
 	macb_configure_dma(bp);
 
 
 	/* Initialize TX and RX buffers */
 	/* Initialize TX and RX buffers */
-	macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+	macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+	if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+		macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
 #endif
 #endif
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-		queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-		queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+		if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+			queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 #endif
 
 
 		/* Enable interrupts */
 		/* Enable interrupts */
@@ -2655,7 +2716,8 @@ static int macb_init(struct platform_device *pdev)
 			queue->IMR  = GEM_IMR(hw_q - 1);
 			queue->IMR  = GEM_IMR(hw_q - 1);
 			queue->TBQP = GEM_TBQP(hw_q - 1);
 			queue->TBQP = GEM_TBQP(hw_q - 1);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			queue->TBQPH = GEM_TBQPH(hw_q -1);
+			if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+				queue->TBQPH = GEM_TBQPH(hw_q - 1);
 #endif
 #endif
 		} else {
 		} else {
 			/* queue0 uses legacy registers */
 			/* queue0 uses legacy registers */
@@ -2665,7 +2727,8 @@ static int macb_init(struct platform_device *pdev)
 			queue->IMR  = MACB_IMR;
 			queue->IMR  = MACB_IMR;
 			queue->TBQP = MACB_TBQP;
 			queue->TBQP = MACB_TBQP;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			queue->TBQPH = MACB_TBQPH;
+			if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+				queue->TBQPH = MACB_TBQPH;
 #endif
 #endif
 		}
 		}
 
 
@@ -2758,13 +2821,14 @@ static int macb_init(struct platform_device *pdev)
 static int at91ether_start(struct net_device *dev)
 static int at91ether_start(struct net_device *dev)
 {
 {
 	struct macb *lp = netdev_priv(dev);
 	struct macb *lp = netdev_priv(dev);
+	struct macb_dma_desc *desc;
 	dma_addr_t addr;
 	dma_addr_t addr;
 	u32 ctl;
 	u32 ctl;
 	int i;
 	int i;
 
 
 	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 					 (AT91ETHER_MAX_RX_DESCR *
 					 (AT91ETHER_MAX_RX_DESCR *
-					  sizeof(struct macb_dma_desc)),
+					  macb_dma_desc_get_size(lp)),
 					 &lp->rx_ring_dma, GFP_KERNEL);
 					 &lp->rx_ring_dma, GFP_KERNEL);
 	if (!lp->rx_ring)
 	if (!lp->rx_ring)
 		return -ENOMEM;
 		return -ENOMEM;
@@ -2776,7 +2840,7 @@ static int at91ether_start(struct net_device *dev)
 	if (!lp->rx_buffers) {
 	if (!lp->rx_buffers) {
 		dma_free_coherent(&lp->pdev->dev,
 		dma_free_coherent(&lp->pdev->dev,
 				  AT91ETHER_MAX_RX_DESCR *
 				  AT91ETHER_MAX_RX_DESCR *
-				  sizeof(struct macb_dma_desc),
+				  macb_dma_desc_get_size(lp),
 				  lp->rx_ring, lp->rx_ring_dma);
 				  lp->rx_ring, lp->rx_ring_dma);
 		lp->rx_ring = NULL;
 		lp->rx_ring = NULL;
 		return -ENOMEM;
 		return -ENOMEM;
@@ -2784,13 +2848,14 @@ static int at91ether_start(struct net_device *dev)
 
 
 	addr = lp->rx_buffers_dma;
 	addr = lp->rx_buffers_dma;
 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
-		lp->rx_ring[i].addr = addr;
-		lp->rx_ring[i].ctrl = 0;
+		desc = macb_rx_desc(lp, i);
+		macb_set_addr(lp, desc, addr);
+		desc->ctrl = 0;
 		addr += AT91ETHER_MAX_RBUFF_SZ;
 		addr += AT91ETHER_MAX_RBUFF_SZ;
 	}
 	}
 
 
 	/* Set the Wrap bit on the last descriptor */
 	/* Set the Wrap bit on the last descriptor */
-	lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+	desc->addr |= MACB_BIT(RX_WRAP);
 
 
 	/* Reset buffer index */
 	/* Reset buffer index */
 	lp->rx_tail = 0;
 	lp->rx_tail = 0;
@@ -2862,7 +2927,7 @@ static int at91ether_close(struct net_device *dev)
 
 
 	dma_free_coherent(&lp->pdev->dev,
 	dma_free_coherent(&lp->pdev->dev,
 			  AT91ETHER_MAX_RX_DESCR *
 			  AT91ETHER_MAX_RX_DESCR *
-			  sizeof(struct macb_dma_desc),
+			  macb_dma_desc_get_size(lp),
 			  lp->rx_ring, lp->rx_ring_dma);
 			  lp->rx_ring, lp->rx_ring_dma);
 	lp->rx_ring = NULL;
 	lp->rx_ring = NULL;
 
 
@@ -2913,13 +2978,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 {
 {
 	struct macb *lp = netdev_priv(dev);
 	struct macb *lp = netdev_priv(dev);
+	struct macb_dma_desc *desc;
 	unsigned char *p_recv;
 	unsigned char *p_recv;
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	unsigned int pktlen;
 	unsigned int pktlen;
 
 
-	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+	desc = macb_rx_desc(lp, lp->rx_tail);
+	while (desc->addr & MACB_BIT(RX_USED)) {
 		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
 		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
-		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
 		skb = netdev_alloc_skb(dev, pktlen + 2);
 		skb = netdev_alloc_skb(dev, pktlen + 2);
 		if (skb) {
 		if (skb) {
 			skb_reserve(skb, 2);
 			skb_reserve(skb, 2);
@@ -2933,17 +3000,19 @@ static void at91ether_rx(struct net_device *dev)
 			lp->stats.rx_dropped++;
 			lp->stats.rx_dropped++;
 		}
 		}
 
 
-		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+		if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
 			lp->stats.multicast++;
 			lp->stats.multicast++;
 
 
 		/* reset ownership bit */
 		/* reset ownership bit */
-		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+		desc->addr &= ~MACB_BIT(RX_USED);
 
 
 		/* wrap after last buffer */
 		/* wrap after last buffer */
 		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
 		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
 			lp->rx_tail = 0;
 			lp->rx_tail = 0;
 		else
 		else
 			lp->rx_tail++;
 			lp->rx_tail++;
+
+		desc = macb_rx_desc(lp, lp->rx_tail);
 	}
 	}
 }
 }
 
 
@@ -3239,8 +3308,11 @@ static int macb_probe(struct platform_device *pdev)
 	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
 		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
 		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+		bp->hw_dma_cap = HW_DMA_CAP_64B;
+	} else
+		bp->hw_dma_cap = HW_DMA_CAP_32B;
 #endif
 #endif
 
 
 	spin_lock_init(&bp->lock);
 	spin_lock_init(&bp->lock);

+ 16 - 3
drivers/net/ethernet/cadence/macb.h

@@ -426,6 +426,8 @@
 /* Bitfields in DCFG6. */
 /* Bitfields in DCFG6. */
 #define GEM_PBUF_LSO_OFFSET			27
 #define GEM_PBUF_LSO_OFFSET			27
 #define GEM_PBUF_LSO_SIZE			1
 #define GEM_PBUF_LSO_SIZE			1
+#define GEM_DAW64_OFFSET			23
+#define GEM_DAW64_SIZE				1
 
 
 /* Bitfields in TISUBN */
 /* Bitfields in TISUBN */
 #define GEM_SUBNSINCR_OFFSET			0
 #define GEM_SUBNSINCR_OFFSET			0
@@ -540,11 +542,19 @@
 struct macb_dma_desc {
 struct macb_dma_desc {
 	u32	addr;
 	u32	addr;
 	u32	ctrl;
 	u32	ctrl;
+};
+
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	u32     addrh;
-	u32     resvd;
-#endif
+enum macb_hw_dma_cap {
+	HW_DMA_CAP_32B,
+	HW_DMA_CAP_64B,
+};
+
+struct macb_dma_desc_64 {
+	u32 addrh;
+	u32 resvd;
 };
 };
+#endif
 
 
 /* DMA descriptor bitfields */
 /* DMA descriptor bitfields */
 #define MACB_RX_USED_OFFSET			0
 #define MACB_RX_USED_OFFSET			0
@@ -943,6 +953,9 @@ struct macb {
 	u32			wol;
 	u32			wol;
 
 
 	struct macb_ptp_info	*ptp_info;	/* macb-ptp interface */
 	struct macb_ptp_info	*ptp_info;	/* macb-ptp interface */
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	enum macb_hw_dma_cap hw_dma_cap;
+#endif
 };
 };
 
 
 static inline bool macb_is_gem(struct macb *bp)
 static inline bool macb_is_gem(struct macb *bp)

+ 1 - 2
drivers/net/ethernet/cavium/thunder/thunder_xcv.c

@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
 	int speed = 2;
 	int speed = 2;
 
 
 	if (!xcv) {
 	if (!xcv) {
-		dev_err(&xcv->pdev->dev,
-			"XCV init not done, probe may have failed\n");
+		pr_err("XCV init not done, probe may have failed\n");
 		return;
 		return;
 	}
 	}
 
 

+ 28 - 5
drivers/net/ethernet/emulex/benet/be_main.c

@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
 		status = -EPERM;
 		status = -EPERM;
 		goto err;
 		goto err;
 	}
 	}
-done:
+
+	/* Remember currently programmed MAC */
 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
 	return 0;
 	return 0;
@@ -3617,8 +3619,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
 {
 {
 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT))
+	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
+		eth_zero_addr(adapter->dev_mac);
+	}
 
 
 	be_clear_uc_list(adapter);
 	be_clear_uc_list(adapter);
 	be_clear_mc_list(adapter);
 	be_clear_mc_list(adapter);
@@ -3772,12 +3776,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
 	if (status)
 	if (status)
 		return status;
 		return status;
 
 
-	/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
-	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+	/* Normally this condition usually true as the ->dev_mac is zeroed.
+	 * But on BE3 VFs the initial MAC is pre-programmed by PF and
+	 * subsequent be_dev_mac_add() can fail (after fresh boot)
+	 */
+	if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+		int old_pmac_id = -1;
+
+		/* Remember old programmed MAC if any - can happen on BE3 VF */
+		if (!is_zero_ether_addr(adapter->dev_mac))
+			old_pmac_id = adapter->pmac_id[0];
+
 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
 		if (status)
 		if (status)
 			return status;
 			return status;
+
+		/* Delete the old programmed MAC as we successfully programmed
+		 * a new MAC
+		 */
+		if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+			be_dev_mac_del(adapter, old_pmac_id);
+
 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
 	}
 	}
 
 
@@ -4551,6 +4570,10 @@ static int be_mac_setup(struct be_adapter *adapter)
 
 
 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+		/* Initial MAC for BE3 VFs is already programmed by PF */
+		if (BEx_chip(adapter) && be_virtfn(adapter))
+			memcpy(adapter->dev_mac, mac, ETH_ALEN);
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 2 - 2
drivers/net/ethernet/freescale/gianfar.c

@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 		if (!rxb->page)
 		if (!rxb->page)
 			continue;
 			continue;
 
 
-		dma_unmap_single(rx_queue->dev, rxb->dma,
-				 PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_unmap_page(rx_queue->dev, rxb->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
 		__free_page(rxb->page);
 		__free_page(rxb->page);
 
 
 		rxb->page = NULL;
 		rxb->page = NULL;

+ 1 - 1
drivers/net/ethernet/mellanox/mlx4/catas.c

@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
 	return -ETIMEDOUT;
 	return -ETIMEDOUT;
 }
 }
 
 
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
 {
 {
 	return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
 	return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
 		(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
 		(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;

+ 12 - 0
drivers/net/ethernet/mellanox/mlx4/intf.c

@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
 		return;
 		return;
 
 
 	mlx4_stop_catas_poll(dev);
 	mlx4_stop_catas_poll(dev);
+	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+	    mlx4_is_slave(dev)) {
+		/* In mlx4_remove_one on a VF */
+		u32 slave_read =
+			swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+		if (mlx4_comm_internal_err(slave_read)) {
+			mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+				 __func__);
+			mlx4_enter_error_state(dev->persist);
+		}
+	}
 	mutex_lock(&intf_mutex);
 	mutex_lock(&intf_mutex);
 
 
 	list_for_each_entry(intf, &intf_list, list)
 	list_for_each_entry(intf, &intf_list, list)

+ 1 - 0
drivers/net/ethernet/mellanox/mlx4/mlx4.h

@@ -1225,6 +1225,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
 
 
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
 		    enum mlx4_port_type *type);
 		    enum mlx4_port_type *type);

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c

@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
 	if (cmd->cmdif_rev > CMD_IF_REV) {
 	if (cmd->cmdif_rev > CMD_IF_REV) {
 		dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
 		dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
 			CMD_IF_REV, cmd->cmdif_rev);
 			CMD_IF_REV, cmd->cmdif_rev);
-		err = -ENOTSUPP;
+		err = -EOPNOTSUPP;
 		goto err_free_page;
 		goto err_free_page;
 	}
 	}
 
 

+ 4 - 3
drivers/net/ethernet/mellanox/mlx5/core/en.h

@@ -799,7 +799,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 
 
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+				    enum mlx5e_traffic_types tt);
 
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
@@ -865,12 +866,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
 
 
 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 {
 {
-	return -ENOTSUPP;
+	return -EOPNOTSUPP;
 }
 }
 
 
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 {
 {
-	return -ENOTSUPP;
+	return -EOPNOTSUPP;
 }
 }
 #else
 #else
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);

+ 8 - 3
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c

@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
 	int i;
 	int i;
 
 
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
 	for (i = 0; i < ets->ets_cap; i++) {
 	for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
 	int err;
 	int err;
 
 
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	err = mlx5e_dbcnl_validate_ets(netdev, ets);
 	err = mlx5e_dbcnl_validate_ets(netdev, ets);
 	if (err)
 	if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct ieee_ets ets;
 	struct ieee_ets ets;
 	struct ieee_pfc pfc;
 	struct ieee_pfc pfc;
-	int err = -ENOTSUPP;
+	int err = -EOPNOTSUPP;
 	int i;
 	int i;
 
 
 	if (!MLX5_CAP_GEN(mdev, ets))
 	if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_core_dev *mdev = priv->mdev;
 
 
+	if (!MLX5_CAP_GEN(priv->mdev, ets)) {
+		netdev_err(netdev, "%s, ets is not supported\n", __func__);
+		return;
+	}
+
 	if (priority >= CEE_DCBX_MAX_PRIO) {
 	if (priority >= CEE_DCBX_MAX_PRIO) {
 		netdev_err(netdev,
 		netdev_err(netdev,
 			   "%s, priority is out of range\n", __func__);
 			   "%s, priority is out of range\n", __func__);

+ 26 - 15
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c

@@ -612,7 +612,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 
 
 	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
 	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
 	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
 	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
 	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -637,7 +637,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
 	int i;
 	int i;
 
 
 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	mutex_lock(&priv->state_lock);
 	mutex_lock(&priv->state_lock);
 
 
@@ -997,15 +997,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 
 
 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 {
 {
-	struct mlx5_core_dev *mdev = priv->mdev;
 	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
 	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-	int i;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+	int tt;
 
 
 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-	mlx5e_build_tir_ctx_hash(tirc, priv);
 
 
-	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
-		mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+		memset(tirc, 0, ctxlen);
+		mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+		mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+	}
 }
 }
 
 
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -1013,6 +1016,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 {
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	bool hash_changed = false;
 	void *in;
 	void *in;
 
 
 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1034,14 +1038,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
 		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
 	}
 	}
 
 
-	if (key)
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+	    hfunc != priv->params.rss_hfunc) {
+		priv->params.rss_hfunc = hfunc;
+		hash_changed = true;
+	}
+
+	if (key) {
 		memcpy(priv->params.toeplitz_hash_key, key,
 		memcpy(priv->params.toeplitz_hash_key, key,
 		       sizeof(priv->params.toeplitz_hash_key));
 		       sizeof(priv->params.toeplitz_hash_key));
+		hash_changed = hash_changed ||
+			       priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+	}
 
 
-	if (hfunc != ETH_RSS_HASH_NO_CHANGE)
-		priv->params.rss_hfunc = hfunc;
-
-	mlx5e_modify_tirs_hash(priv, in, inlen);
+	if (hash_changed)
+		mlx5e_modify_tirs_hash(priv, in, inlen);
 
 
 	mutex_unlock(&priv->state_lock);
 	mutex_unlock(&priv->state_lock);
 
 
@@ -1313,7 +1324,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	u32 mlx5_wol_mode;
 	u32 mlx5_wol_mode;
 
 
 	if (!wol_supported)
 	if (!wol_supported)
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	if (wol->wolopts & ~wol_supported)
 	if (wol->wolopts & ~wol_supported)
 		return -EINVAL;
 		return -EINVAL;
@@ -1443,7 +1454,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
 
 
 	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
 	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
 	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
 	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	if (!rx_mode_changed)
 	if (!rx_mode_changed)
 		return 0;
 		return 0;
@@ -1467,7 +1478,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_core_dev *mdev = priv->mdev;
 
 
 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
 	if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
 		netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
 		netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c

@@ -1136,7 +1136,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 
 
 	if (!priv->fs.ns)
 	if (!priv->fs.ns)
-		return -EINVAL;
+		return -EOPNOTSUPP;
 
 
 	err = mlx5e_arfs_create_tables(priv);
 	err = mlx5e_arfs_create_tables(priv);
 	if (err) {
 	if (err) {

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c

@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
 	ns = mlx5_get_flow_namespace(priv->mdev,
 	ns = mlx5_get_flow_namespace(priv->mdev,
 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
 				     MLX5_FLOW_NAMESPACE_ETHTOOL);
 	if (!ns)
 	if (!ns)
-		return ERR_PTR(-ENOTSUPP);
+		return ERR_PTR(-EOPNOTSUPP);
 
 
 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
 						       flow_table_properties_nic_receive.log_max_ft_size)),
 						       flow_table_properties_nic_receive.log_max_ft_size)),

+ 101 - 101
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

@@ -2071,8 +2071,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
 	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 }
 }
 
 
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+				    enum mlx5e_traffic_types tt)
 {
 {
+	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
+				 MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
 	MLX5_SET(tirc, tirc, rx_hash_fn,
 	MLX5_SET(tirc, tirc, rx_hash_fn,
 		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
 		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
 	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
 	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2084,6 +2099,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
 		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
 		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
 	}
 	}
+
+	switch (tt) {
+	case MLX5E_TT_IPV4_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+
+	case MLX5E_TT_IPV6:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+	default:
+		WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+	}
 }
 }
 
 
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2452,110 +2549,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
 				      enum mlx5e_traffic_types tt)
 				      enum mlx5e_traffic_types tt)
 {
 {
-	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
 	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
 
-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
-				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
-				 MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
-				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
 	mlx5e_build_tir_ctx_lro(tirc, priv);
 	mlx5e_build_tir_ctx_lro(tirc, priv);
 
 
 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
 	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
 	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-	mlx5e_build_tir_ctx_hash(tirc, priv);
-
-	switch (tt) {
-	case MLX5E_TT_IPV4_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV6_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV4_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV6_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV4_IPSEC_AH:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV6_IPSEC_AH:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV4_IPSEC_ESP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV6_IPSEC_ESP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV4:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-
-	case MLX5E_TT_IPV6:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-	default:
-		WARN_ONCE(true,
-			  "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
-	}
+	mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
 }
 }
 
 
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3370,7 +3370,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 {
 {
 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
 	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
 	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
 	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
 	    !MLX5_CAP_ETH(mdev, csum_cap) ||
 	    !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3382,7 +3382,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 			       < 3) {
 			       < 3) {
 		mlx5_core_warn(mdev,
 		mlx5_core_warn(mdev,
 			       "Not creating net device, some required device capabilities are missing\n");
 			       "Not creating net device, some required device capabilities are missing\n");
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	}
 	}
 	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
 	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
 		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
 		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");

+ 6 - 8
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c

@@ -686,6 +686,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 				   struct neighbour **out_n,
 				   struct neighbour **out_n,
 				   int *out_ttl)
 				   int *out_ttl)
 {
 {
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 	struct rtable *rt;
 	struct rtable *rt;
 	struct neighbour *n = NULL;
 	struct neighbour *n = NULL;
 
 
@@ -699,12 +700,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
 #else
 	return -EOPNOTSUPP;
 	return -EOPNOTSUPP;
 #endif
 #endif
-
-	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-		pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
-		ip_rt_put(rt);
-		return -EOPNOTSUPP;
-	}
+	/* if the egress device isn't on the same HW e-switch, we use the uplink */
+	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
+		*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+	else
+		*out_dev = rt->dst.dev;
 
 
 	*out_ttl = ip4_dst_hoplimit(&rt->dst);
 	*out_ttl = ip4_dst_hoplimit(&rt->dst);
 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -713,8 +713,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	*out_n = n;
 	*out_n = n;
-	*out_dev = rt->dst.dev;
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 5 - 5
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c

@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
 
 
 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
 		  vport, vlan, qos, set_flags);
 		  vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
-		return -ENOMEM;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	flow_group_in = mlx5_vzalloc(inlen);
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
 		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
-		return -EIO;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	flow_group_in = mlx5_vzalloc(inlen);
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
-		return -EIO;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	flow_group_in = mlx5_vzalloc(inlen);
 	flow_group_in = mlx5_vzalloc(inlen);
@@ -1634,7 +1634,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
 	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
 		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
 		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))

+ 22 - 14
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
 	return 0;
 	return 0;
 
 
 out_notsupp:
 out_notsupp:
-	return -ENOTSUPP;
+	return -EOPNOTSUPP;
 }
 }
 
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -423,6 +423,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
 	if (!root_ns) {
 	if (!root_ns) {
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
 		esw_warn(dev, "Failed to get FDB flow namespace\n");
+		err = -EOPNOTSUPP;
 		goto ns_err;
 		goto ns_err;
 	}
 	}
 
 
@@ -538,7 +539,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
 	if (!ns) {
 	if (!ns) {
 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
-		return -ENOMEM;
+		return -EOPNOTSUPP;
 	}
 	}
 
 
 	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
 	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -658,7 +659,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
 		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
 		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 		if (err1)
 		if (err1)
-			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
 	}
 	}
 	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
 	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
 		if (mlx5_eswitch_inline_mode_get(esw,
 		if (mlx5_eswitch_inline_mode_get(esw,
@@ -677,9 +678,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
 	int vport;
 	int vport;
 	int err;
 	int err;
 
 
+	/* disable PF RoCE so missed packets don't go through RoCE steering */
+	mlx5_dev_list_lock();
+	mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	err = esw_create_offloads_fdb_table(esw, nvports);
 	err = esw_create_offloads_fdb_table(esw, nvports);
 	if (err)
 	if (err)
-		return err;
+		goto create_fdb_err;
 
 
 	err = esw_create_offloads_table(esw);
 	err = esw_create_offloads_table(esw);
 	if (err)
 	if (err)
@@ -699,11 +705,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
 			goto err_reps;
 			goto err_reps;
 	}
 	}
 
 
-	/* disable PF RoCE so missed packets don't go through RoCE steering */
-	mlx5_dev_list_lock();
-	mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-	mlx5_dev_list_unlock();
-
 	return 0;
 	return 0;
 
 
 err_reps:
 err_reps:
@@ -720,6 +721,13 @@ create_fg_err:
 
 
 create_ft_err:
 create_ft_err:
 	esw_destroy_offloads_fdb_table(esw);
 	esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+	/* enable back PF RoCE */
+	mlx5_dev_list_lock();
+	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	return err;
 	return err;
 }
 }
 
 
@@ -727,11 +735,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
 {
 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
 
-	/* enable back PF RoCE */
-	mlx5_dev_list_lock();
-	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-	mlx5_dev_list_unlock();
-
 	mlx5_eswitch_disable_sriov(esw);
 	mlx5_eswitch_disable_sriov(esw);
 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 	if (err) {
 	if (err) {
@@ -741,6 +744,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
 			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
 	}
 	}
 
 
+	/* enable back PF RoCE */
+	mlx5_dev_list_lock();
+	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	return err;
 	return err;
 }
 }
 
 

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c

@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
 						flow_table_properties_nic_receive.
 						flow_table_properties_nic_receive.
 						flow_modify_en);
 						flow_modify_en);
 	if (!atomic_mod_cap)
 	if (!atomic_mod_cap)
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	opmod = 1;
 	opmod = 1;
 
 
 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c

@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
 	struct mlx5_flow_table *ft;
 	struct mlx5_flow_table *ft;
 
 
 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
-	if (!ns)
+	if (WARN_ON(!ns))
 		return -EINVAL;
 		return -EINVAL;
 	ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
 	ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
 	if (IS_ERR(ft)) {
 	if (IS_ERR(ft)) {

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/main.c

@@ -831,7 +831,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
 		return 0;
 		return 0;
 	}
 	}
 
 
-	return -ENOTSUPP;
+	return -EOPNOTSUPP;
 }
 }
 
 
 
 

+ 2 - 2
drivers/net/ethernet/mellanox/mlx5/core/port.c

@@ -644,7 +644,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
 	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
 	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
 
 
 	if (!MLX5_CAP_GEN(mdev, ets))
 	if (!MLX5_CAP_GEN(mdev, ets))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
 	return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
 				    MLX5_REG_QETCR, 0, 1);
 				    MLX5_REG_QETCR, 0, 1);
@@ -656,7 +656,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
 	u32 in[MLX5_ST_SZ_DW(qtct_reg)];
 	u32 in[MLX5_ST_SZ_DW(qtct_reg)];
 
 
 	if (!MLX5_CAP_GEN(mdev, ets))
 	if (!MLX5_CAP_GEN(mdev, ets))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	memset(in, 0, sizeof(in));
 	memset(in, 0, sizeof(in));
 	return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
 	return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/vport.c

@@ -549,7 +549,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
 		return -EACCES;
 		return -EACCES;
 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 
 	in = mlx5_vzalloc(inlen);
 	in = mlx5_vzalloc(inlen);
 	if (!in)
 	if (!in)

+ 4 - 0
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c

@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
 {
 {
 	void __iomem *ioaddr = hw->pcsr;
 	void __iomem *ioaddr = hw->pcsr;
 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+	u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
 	int ret = 0;
 	int ret = 0;
 
 
+	/* Discard masked bits */
+	intr_status &= ~intr_mask;
+
 	/* Not used events (e.g. MMC interrupts) are not handled. */
 	/* Not used events (e.g. MMC interrupts) are not handled. */
 	if ((intr_status & GMAC_INT_STATUS_MMCTIS))
 	if ((intr_status & GMAC_INT_STATUS_MMCTIS))
 		x->mmc_tx_irq_n++;
 		x->mmc_tx_irq_n++;

+ 1 - 1
drivers/net/phy/micrel.c

@@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id		= PHY_ID_KSZ8795,
 	.phy_id		= PHY_ID_KSZ8795,
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8795",
 	.name		= "Micrel KSZ8795",
-	.features	= (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.config_init	= kszphy_config_init,
 	.config_aneg	= ksz8873mll_config_aneg,
 	.config_aneg	= ksz8873mll_config_aneg,

+ 1 - 1
drivers/net/wireless/intel/iwlwifi/iwl-8000.c

@@ -91,7 +91,7 @@
 
 
 #define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
 #define IWL8000_MODULE_FIRMWARE(api) \
-	IWL8000_FW_PRE "-" __stringify(api) ".ucode"
+	IWL8000_FW_PRE __stringify(api) ".ucode"
 
 
 #define IWL8265_FW_PRE "iwlwifi-8265-"
 #define IWL8265_FW_PRE "iwlwifi-8265-"
 #define IWL8265_MODULE_FIRMWARE(api) \
 #define IWL8265_MODULE_FIRMWARE(api) \

+ 4 - 3
drivers/net/wireless/intel/iwlwifi/mvm/sta.c

@@ -1149,9 +1149,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
 		.frame_limit = IWL_FRAME_LIMIT,
 		.frame_limit = IWL_FRAME_LIMIT,
 	};
 	};
 
 
-	/* Make sure reserved queue is still marked as such (or allocated) */
-	mvm->queue_info[mvm_sta->reserved_queue].status =
-		IWL_MVM_QUEUE_RESERVED;
+	/* Make sure reserved queue is still marked as such (if allocated) */
+	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+		mvm->queue_info[mvm_sta->reserved_queue].status =
+			IWL_MVM_QUEUE_RESERVED;
 
 
 	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
 	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
 		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
 		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];

+ 8 - 4
drivers/net/wireless/intel/iwlwifi/mvm/tt.c

@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
 		return;
 		return;
 
 
 	IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
 	IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
-	thermal_zone_device_unregister(mvm->tz_device.tzone);
-	mvm->tz_device.tzone = NULL;
+	if (mvm->tz_device.tzone) {
+		thermal_zone_device_unregister(mvm->tz_device.tzone);
+		mvm->tz_device.tzone = NULL;
+	}
 }
 }
 
 
 static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
 static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
 		return;
 		return;
 
 
 	IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
 	IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
-	thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
-	mvm->cooling_dev.cdev = NULL;
+	if (mvm->cooling_dev.cdev) {
+		thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+		mvm->cooling_dev.cdev = NULL;
+	}
 }
 }
 #endif /* CONFIG_THERMAL */
 #endif /* CONFIG_THERMAL */
 
 

+ 4 - 4
drivers/parport/parport_gsc.c

@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
 		p->irq = PARPORT_IRQ_NONE;
 		p->irq = PARPORT_IRQ_NONE;
 	}
 	}
 	if (p->irq != PARPORT_IRQ_NONE) {
 	if (p->irq != PARPORT_IRQ_NONE) {
-		printk(", irq %d", p->irq);
+		pr_cont(", irq %d", p->irq);
 
 
 		if (p->dma == PARPORT_DMA_AUTO) {
 		if (p->dma == PARPORT_DMA_AUTO) {
 			p->dma = PARPORT_DMA_NONE;
 			p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
                                            is mandatory (see above) */
                                            is mandatory (see above) */
 		p->dma = PARPORT_DMA_NONE;
 		p->dma = PARPORT_DMA_NONE;
 
 
-	printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+	pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
 	{
 	{
 		int f = 0;
 		int f = 0;
 		printmode(PCSPP);
 		printmode(PCSPP);
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
 //		printmode(DMA);
 //		printmode(DMA);
 	}
 	}
 #undef printmode
 #undef printmode
-	printk("]\n");
+	pr_cont("]\n");
 
 
 	if (p->irq != PARPORT_IRQ_NONE) {
 	if (p->irq != PARPORT_IRQ_NONE) {
 		if (request_irq (p->irq, parport_irq_handler,
 		if (request_irq (p->irq, parport_irq_handler,

+ 1 - 1
drivers/pinctrl/berlin/berlin-bg4ct.c

@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
 	BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
 	BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
 			BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
 			BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
 			BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
 			BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
-			BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+			BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
 	BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
 	BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
 			BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
 			BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
 			BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
 			BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */

+ 20 - 7
drivers/pinctrl/intel/pinctrl-baytrail.c

@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
 				  int reg)
 				  int reg)
 {
 {
 	struct byt_community *comm = byt_get_community(vg, offset);
 	struct byt_community *comm = byt_get_community(vg, offset);
-	u32 reg_offset = 0;
+	u32 reg_offset;
 
 
 	if (!comm)
 	if (!comm)
 		return NULL;
 		return NULL;
 
 
 	offset -= comm->pin_base;
 	offset -= comm->pin_base;
-	if (reg == BYT_INT_STAT_REG)
+	switch (reg) {
+	case BYT_INT_STAT_REG:
 		reg_offset = (offset / 32) * 4;
 		reg_offset = (offset / 32) * 4;
-	else
+		break;
+	case BYT_DEBOUNCE_REG:
+		reg_offset = 0;
+		break;
+	default:
 		reg_offset = comm->pad_map[offset] * 16;
 		reg_offset = comm->pad_map[offset] * 16;
+		break;
+	}
 
 
 	return comm->reg_base + reg_offset + reg;
 	return comm->reg_base + reg_offset + reg;
 }
 }
@@ -1243,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 			debounce = readl(db_reg);
 			debounce = readl(db_reg);
 			debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 			debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
 
+			if (arg)
+				conf |= BYT_DEBOUNCE_EN;
+			else
+				conf &= ~BYT_DEBOUNCE_EN;
+
 			switch (arg) {
 			switch (arg) {
-			case 0:
-				conf &= BYT_DEBOUNCE_EN;
-				break;
 			case 375:
 			case 375:
 				debounce |= BYT_DEBOUNCE_PULSE_375US;
 				debounce |= BYT_DEBOUNCE_PULSE_375US;
 				break;
 				break;
@@ -1269,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 				debounce |= BYT_DEBOUNCE_PULSE_24MS;
 				debounce |= BYT_DEBOUNCE_PULSE_24MS;
 				break;
 				break;
 			default:
 			default:
-				ret = -EINVAL;
+				if (arg)
+					ret = -EINVAL;
+				break;
 			}
 			}
 
 
 			if (!ret)
 			if (!ret)
@@ -1612,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
 			continue;
 			continue;
 		}
 		}
 
 
+		raw_spin_lock(&vg->lock);
 		pending = readl(reg);
 		pending = readl(reg);
+		raw_spin_unlock(&vg->lock);
 		for_each_set_bit(pin, &pending, 32) {
 		for_each_set_bit(pin, &pending, 32) {
 			virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
 			virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
 			generic_handle_irq(virq);
 			generic_handle_irq(virq);

+ 3 - 0
drivers/pinctrl/intel/pinctrl-merrifield.c

@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
 	unsigned int i;
 	unsigned int i;
 	int ret;
 	int ret;
 
 
+	if (!mrfld_buf_available(mp, pin))
+		return -ENOTSUPP;
+
 	for (i = 0; i < nconfigs; i++) {
 	for (i = 0; i < nconfigs; i++) {
 		switch (pinconf_to_config_param(configs[i])) {
 		switch (pinconf_to_config_param(configs[i])) {
 		case PIN_CONFIG_BIAS_DISABLE:
 		case PIN_CONFIG_BIAS_DISABLE:

+ 1 - 2
drivers/pinctrl/sunxi/pinctrl-sunxi.c

@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
 			val = arg / 10 - 1;
 			val = arg / 10 - 1;
 			break;
 			break;
 		case PIN_CONFIG_BIAS_DISABLE:
 		case PIN_CONFIG_BIAS_DISABLE:
-			val = 0;
-			break;
+			continue;
 		case PIN_CONFIG_BIAS_PULL_UP:
 		case PIN_CONFIG_BIAS_PULL_UP:
 			if (arg == 0)
 			if (arg == 0)
 				return -EINVAL;
 				return -EINVAL;

+ 4 - 1
drivers/rtc/Kconfig

@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
 	  will be called rtc-mpc5121.
 	  will be called rtc-mpc5121.
 
 
 config RTC_DRV_JZ4740
 config RTC_DRV_JZ4740
-	bool "Ingenic JZ4740 SoC"
+	tristate "Ingenic JZ4740 SoC"
 	depends on MACH_INGENIC || COMPILE_TEST
 	depends on MACH_INGENIC || COMPILE_TEST
 	help
 	help
 	  If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
 	  If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
 	  controllers.
 	  controllers.
 
 
+	  This driver can also be buillt as a module. If so, the module
+	  will be called rtc-jz4740.
+
 config RTC_DRV_LPC24XX
 config RTC_DRV_LPC24XX
 	tristate "NXP RTC for LPC178x/18xx/408x/43xx"
 	tristate "NXP RTC for LPC178x/18xx/408x/43xx"
 	depends on ARCH_LPC18XX || COMPILE_TEST
 	depends on ARCH_LPC18XX || COMPILE_TEST

+ 10 - 2
drivers/rtc/rtc-jz4740.c

@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
 			     JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
 			     JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
 
 
 	jz4740_rtc_poweroff(dev_for_power_off);
 	jz4740_rtc_poweroff(dev_for_power_off);
-	machine_halt();
+	kernel_halt();
 }
 }
 
 
 static const struct of_device_id jz4740_rtc_of_match[] = {
 static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
 	{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
 	{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
 	{},
 	{},
 };
 };
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
 
 
 static int jz4740_rtc_probe(struct platform_device *pdev)
 static int jz4740_rtc_probe(struct platform_device *pdev)
 {
 {
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
 	{ "jz4780-rtc", ID_JZ4780 },
 	{ "jz4780-rtc", ID_JZ4780 },
 	{}
 	{}
 };
 };
+MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
 
 
 static struct platform_driver jz4740_rtc_driver = {
 static struct platform_driver jz4740_rtc_driver = {
 	.probe	 = jz4740_rtc_probe,
 	.probe	 = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
 	.id_table = jz4740_rtc_ids,
 	.id_table = jz4740_rtc_ids,
 };
 };
 
 
-builtin_platform_driver(jz4740_rtc_driver);
+module_platform_driver(jz4740_rtc_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
+MODULE_ALIAS("platform:jz4740-rtc");

+ 1 - 0
fs/cifs/readdir.c

@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto error_exit;
 			goto error_exit;
 		}
 		}
+		spin_lock_init(&cifsFile->file_info_lock);
 		file->private_data = cifsFile;
 		file->private_data = cifsFile;
 		cifsFile->tlink = cifs_get_tlink(tlink);
 		cifsFile->tlink = cifs_get_tlink(tlink);
 		tcon = tlink_tcon(tlink);
 		tcon = tlink_tcon(tlink);

+ 5 - 0
fs/fscache/cookie.c

@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
 		hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
 		hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
 			if (invalidate)
 			if (invalidate)
 				set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
 				set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+			clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
 			fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
 			fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
 		}
 		}
 	} else {
 	} else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
 		wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
 		wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
 				 TASK_UNINTERRUPTIBLE);
 				 TASK_UNINTERRUPTIBLE);
 
 
+	/* Make sure any pending writes are cancelled. */
+	if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+		fscache_invalidate_writes(cookie);
+
 	/* Reset the cookie state if it wasn't relinquished */
 	/* Reset the cookie state if it wasn't relinquished */
 	if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
 	if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
 		atomic_inc(&cookie->n_active);
 		atomic_inc(&cookie->n_active);

+ 1 - 0
fs/fscache/netfs.c

@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
 	cookie->flags		= 1 << FSCACHE_COOKIE_ENABLED;
 	cookie->flags		= 1 << FSCACHE_COOKIE_ENABLED;
 
 
 	spin_lock_init(&cookie->lock);
 	spin_lock_init(&cookie->lock);
+	spin_lock_init(&cookie->stores_lock);
 	INIT_HLIST_HEAD(&cookie->backing_objects);
 	INIT_HLIST_HEAD(&cookie->backing_objects);
 
 
 	/* check the netfs type is not already present */
 	/* check the netfs type is not already present */

+ 30 - 2
fs/fscache/object.c

@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
 static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
 static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
 static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
 static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
 static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
 static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
 
 
 #define __STATE_NAME(n) fscache_osm_##n
 #define __STATE_NAME(n) fscache_osm_##n
 #define STATE(n) (&__STATE_NAME(n))
 #define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE,	"LCFL", fscache_lookup_failure);
 static WORK_STATE(KILL_OBJECT,		"KILL", fscache_kill_object);
 static WORK_STATE(KILL_OBJECT,		"KILL", fscache_kill_object);
 static WORK_STATE(KILL_DEPENDENTS,	"KDEP", fscache_kill_dependents);
 static WORK_STATE(KILL_DEPENDENTS,	"KDEP", fscache_kill_dependents);
 static WORK_STATE(DROP_OBJECT,		"DROP", fscache_drop_object);
 static WORK_STATE(DROP_OBJECT,		"DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD,		"DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD,		"DEAD", fscache_object_dead);
 
 
 static WAIT_STATE(WAIT_FOR_INIT,	"?INI",
 static WAIT_STATE(WAIT_FOR_INIT,	"?INI",
 		  TRANSIT_TO(INIT_OBJECT,	1 << FSCACHE_OBJECT_EV_NEW_CHILD));
 		  TRANSIT_TO(INIT_OBJECT,	1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ execute_work_state:
 	event = -1;
 	event = -1;
 	if (new_state == NO_TRANSIT) {
 	if (new_state == NO_TRANSIT) {
 		_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
 		_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+		if (unlikely(state == STATE(OBJECT_DEAD))) {
+			_leave(" [dead]");
+			return;
+		}
 		fscache_enqueue_object(object);
 		fscache_enqueue_object(object);
 		event_mask = object->oob_event_mask;
 		event_mask = object->oob_event_mask;
 		goto unmask_events;
 		goto unmask_events;
@@ -239,7 +244,7 @@ execute_work_state:
 	object->state = state = new_state;
 	object->state = state = new_state;
 
 
 	if (state->work) {
 	if (state->work) {
-		if (unlikely(state->work == ((void *)2UL))) {
+		if (unlikely(state == STATE(OBJECT_DEAD))) {
 			_leave(" [dead]");
 			_leave(" [dead]");
 			return;
 			return;
 		}
 		}
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
 	fscache_mark_object_dead(object);
 	fscache_mark_object_dead(object);
 	object->oob_event_mask = 0;
 	object->oob_event_mask = 0;
 
 
+	if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+		/* Reject any new read/write ops and abort any that are pending. */
+		clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+		fscache_cancel_all_ops(object);
+	}
+
 	if (list_empty(&object->dependents) &&
 	if (list_empty(&object->dependents) &&
 	    object->n_ops == 0 &&
 	    object->n_ops == 0 &&
 	    object->n_children == 0)
 	    object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
 	}
 	}
 }
 }
 EXPORT_SYMBOL(fscache_object_mark_killed);
 EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead.  We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+						       int event)
+{
+	if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+			      &object->flags))
+		return NO_TRANSIT;
+
+	WARN(true, "FS-Cache object redispatched after death");
+	return NO_TRANSIT;
+}

+ 3 - 1
fs/nfs/nfs4proc.c

@@ -2700,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
 		sattr->ia_valid |= ATTR_MTIME;
 		sattr->ia_valid |= ATTR_MTIME;
 
 
 	/* Except MODE, it seems harmless of setting twice. */
 	/* Except MODE, it seems harmless of setting twice. */
-	if ((attrset[1] & FATTR4_WORD1_MODE))
+	if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
+		attrset[1] & FATTR4_WORD1_MODE)
 		sattr->ia_valid &= ~ATTR_MODE;
 		sattr->ia_valid &= ~ATTR_MODE;
 
 
 	if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
 	if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -8490,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
 		goto out;
 		goto out;
 	}
 	}
 
 
+	nfs4_sequence_free_slot(&lgp->res.seq_res);
 	err = nfs4_handle_exception(server, nfs4err, exception);
 	err = nfs4_handle_exception(server, nfs4err, exception);
 	if (!status) {
 	if (!status) {
 		if (exception->retry)
 		if (exception->retry)

+ 1 - 0
fs/nfs/nfs4state.c

@@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
 		case -NFS4ERR_BADXDR:
 		case -NFS4ERR_BADXDR:
 		case -NFS4ERR_RESOURCE:
 		case -NFS4ERR_RESOURCE:
 		case -NFS4ERR_NOFILEHANDLE:
 		case -NFS4ERR_NOFILEHANDLE:
+		case -NFS4ERR_MOVED:
 			/* Non-seqid mutating errors */
 			/* Non-seqid mutating errors */
 			return;
 			return;
 	};
 	};

+ 1 - 1
fs/nfs/pnfs.c

@@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino)
 
 
 	send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
 	send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
 	spin_unlock(&ino->i_lock);
 	spin_unlock(&ino->i_lock);
-	pnfs_free_lseg_list(&tmp_list);
 	if (send)
 	if (send)
 		status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
 		status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
 out_put_layout_hdr:
 out_put_layout_hdr:
+	pnfs_free_lseg_list(&tmp_list);
 	pnfs_put_layout_hdr(lo);
 	pnfs_put_layout_hdr(lo);
 out:
 out:
 	dprintk("<-- %s status: %d\n", __func__, status);
 	dprintk("<-- %s status: %d\n", __func__, status);

+ 3 - 4
include/linux/can/core.h

@@ -45,10 +45,9 @@ struct can_proto {
 extern int  can_proto_register(const struct can_proto *cp);
 extern int  can_proto_register(const struct can_proto *cp);
 extern void can_proto_unregister(const struct can_proto *cp);
 extern void can_proto_unregister(const struct can_proto *cp);
 
 
-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
-			    canid_t mask,
-			    void (*func)(struct sk_buff *, void *),
-			    void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+		    void (*func)(struct sk_buff *, void *),
+		    void *data, char *ident, struct sock *sk);
 
 
 extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
 extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
 			      canid_t mask,
 			      canid_t mask,

+ 1 - 0
include/linux/fscache-cache.h

@@ -360,6 +360,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_IS_AVAILABLE	5	/* T if object has become active */
 #define FSCACHE_OBJECT_IS_AVAILABLE	5	/* T if object has become active */
 #define FSCACHE_OBJECT_RETIRED		6	/* T if object was retired on relinquishment */
 #define FSCACHE_OBJECT_RETIRED		6	/* T if object was retired on relinquishment */
 #define FSCACHE_OBJECT_KILLED_BY_CACHE	7	/* T if object was killed by the cache */
 #define FSCACHE_OBJECT_KILLED_BY_CACHE	7	/* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD	8	/* T if object has been dispatched after death */
 
 
 	struct list_head	cache_link;	/* link in cache->object_list */
 	struct list_head	cache_link;	/* link in cache->object_list */
 	struct hlist_node	cookie_link;	/* link in cookie->backing_objects */
 	struct hlist_node	cookie_link;	/* link in cookie->backing_objects */

+ 15 - 14
include/linux/netdevice.h

@@ -866,11 +866,15 @@ struct netdev_xdp {
  *	of useless work if you return NETDEV_TX_BUSY.
  *	of useless work if you return NETDEV_TX_BUSY.
  *	Required; cannot be NULL.
  *	Required; cannot be NULL.
  *
  *
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- *		netdev_features_t features);
- *	Adjusts the requested feature flags according to device-specific
- *	constraints, and returns the resulting flags. Must not modify
- *	the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ *					   struct net_device *dev
+ *					   netdev_features_t features);
+ *	Called by core transmit path to determine if device is capable of
+ *	performing offload operations on a given packet. This is to give
+ *	the device an opportunity to implement any restrictions that cannot
+ *	be otherwise expressed by feature flags. The check is called with
+ *	the set of features that the stack has calculated and it returns
+ *	those the driver believes to be appropriate.
  *
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  *                         void *accel_priv, select_queue_fallback_t fallback);
  *                         void *accel_priv, select_queue_fallback_t fallback);
@@ -1029,6 +1033,12 @@ struct netdev_xdp {
  *	Called to release previously enslaved netdev.
  *	Called to release previously enslaved netdev.
  *
  *
  *      Feature/offload setting functions.
  *      Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *		netdev_features_t features);
+ *	Adjusts the requested feature flags according to device-specific
+ *	constraints, and returns the resulting flags. Must not modify
+ *	the device state.
+ *
  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *	Called to update device configuration to new features. Passed
  *	Called to update device configuration to new features. Passed
  *	feature set might be less than what was returned by ndo_fix_features()).
  *	feature set might be less than what was returned by ndo_fix_features()).
@@ -1101,15 +1111,6 @@ struct netdev_xdp {
  *	Callback to use for xmit over the accelerated station. This
  *	Callback to use for xmit over the accelerated station. This
  *	is used in place of ndo_start_xmit on accelerated net
  *	is used in place of ndo_start_xmit on accelerated net
  *	devices.
  *	devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- *					   struct net_device *dev
- *					   netdev_features_t features);
- *	Called by core transmit path to determine if device is capable of
- *	performing offload operations on a given packet. This is to give
- *	the device an opportunity to implement any restrictions that cannot
- *	be otherwise expressed by feature flags. The check is called with
- *	the set of features that the stack has calculated and it returns
- *	those the driver believes to be appropriate.
  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  *			     int queue_index, u32 maxrate);
  *			     int queue_index, u32 maxrate);
  *	Called when a user wants to set a max-rate limitation of specific
  *	Called when a user wants to set a max-rate limitation of specific

+ 2 - 1
include/linux/nfs4.h

@@ -282,7 +282,7 @@ enum nfsstat4 {
 
 
 static inline bool seqid_mutating_err(u32 err)
 static inline bool seqid_mutating_err(u32 err)
 {
 {
-	/* rfc 3530 section 8.1.5: */
+	/* See RFC 7530, section 9.1.7 */
 	switch (err) {
 	switch (err) {
 	case NFS4ERR_STALE_CLIENTID:
 	case NFS4ERR_STALE_CLIENTID:
 	case NFS4ERR_STALE_STATEID:
 	case NFS4ERR_STALE_STATEID:
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
 	case NFS4ERR_BADXDR:
 	case NFS4ERR_BADXDR:
 	case NFS4ERR_RESOURCE:
 	case NFS4ERR_RESOURCE:
 	case NFS4ERR_NOFILEHANDLE:
 	case NFS4ERR_NOFILEHANDLE:
+	case NFS4ERR_MOVED:
 		return false;
 		return false;
 	};
 	};
 	return true;
 	return true;

+ 2 - 2
include/linux/percpu-refcount.h

@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 {
 {
 	unsigned long __percpu *percpu_count;
 	unsigned long __percpu *percpu_count;
-	int ret;
+	bool ret;
 
 
 	rcu_read_lock_sched();
 	rcu_read_lock_sched();
 
 
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 {
 {
 	unsigned long __percpu *percpu_count;
 	unsigned long __percpu *percpu_count;
-	int ret = false;
+	bool ret = false;
 
 
 	rcu_read_lock_sched();
 	rcu_read_lock_sched();
 
 

+ 1 - 0
include/linux/sunrpc/clnt.h

@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
 			const struct sockaddr *sap);
 			const struct sockaddr *sap);
+void rpc_cleanup_clids(void);
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_CLNT_H */
 #endif /* _LINUX_SUNRPC_CLNT_H */

+ 5 - 0
include/net/ipv6.h

@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
 {
 {
 	u32 hash;
 	u32 hash;
 
 
+	/* @flowlabel may include more than a flow label, eg, the traffic class.
+	 * Here we want only the flow label value.
+	 */
+	flowlabel &= IPV6_FLOWLABEL_MASK;
+
 	if (flowlabel ||
 	if (flowlabel ||
 	    net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
 	    net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
 	    (!autolabel &&
 	    (!autolabel &&

+ 8 - 8
include/soc/arc/mcip.h

@@ -55,17 +55,17 @@ struct mcip_cmd {
 
 
 struct mcip_bcr {
 struct mcip_bcr {
 #ifdef CONFIG_CPU_BIG_ENDIAN
 #ifdef CONFIG_CPU_BIG_ENDIAN
-		unsigned int pad3:8,
-			     idu:1, llm:1, num_cores:6,
-			     iocoh:1,  gfrc:1, dbg:1, pad2:1,
-			     msg:1, sem:1, ipi:1, pad:1,
+		unsigned int pad4:6, pw_dom:1, pad3:1,
+			     idu:1, pad2:1, num_cores:6,
+			     pad:1,  gfrc:1, dbg:1, pw:1,
+			     msg:1, sem:1, ipi:1, slv:1,
 			     ver:8;
 			     ver:8;
 #else
 #else
 		unsigned int ver:8,
 		unsigned int ver:8,
-			     pad:1, ipi:1, sem:1, msg:1,
-			     pad2:1, dbg:1, gfrc:1, iocoh:1,
-			     num_cores:6, llm:1, idu:1,
-			     pad3:8;
+			     slv:1, ipi:1, sem:1, msg:1,
+			     pw:1, dbg:1, gfrc:1, pad:1,
+			     num_cores:6, pad2:1, idu:1,
+			     pad3:1, pw_dom:1, pad4:6;
 #endif
 #endif
 };
 };
 
 

+ 3 - 1
include/uapi/linux/ethtool.h

@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
 	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT	= 44,
 	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT	= 44,
 	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT	= 45,
 	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT	= 45,
 	ETHTOOL_LINK_MODE_10000baseER_Full_BIT	= 46,
 	ETHTOOL_LINK_MODE_10000baseER_Full_BIT	= 46,
+	ETHTOOL_LINK_MODE_2500baseT_Full_BIT	= 47,
+	ETHTOOL_LINK_MODE_5000baseT_Full_BIT	= 48,
 
 
 
 
 	/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
 	/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
 	 */
 	 */
 
 
 	__ETHTOOL_LINK_MODE_LAST
 	__ETHTOOL_LINK_MODE_LAST
-	  = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+	  = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
 };
 };
 
 
 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name)	\
 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name)	\

+ 5 - 8
kernel/cgroup.c

@@ -5221,6 +5221,11 @@ err_free_css:
 	return ERR_PTR(err);
 	return ERR_PTR(err);
 }
 }
 
 
+/*
+ * The returned cgroup is fully initialized including its control mask, but
+ * it isn't associated with its kernfs_node and doesn't have the control
+ * mask applied.
+ */
 static struct cgroup *cgroup_create(struct cgroup *parent)
 static struct cgroup *cgroup_create(struct cgroup *parent)
 {
 {
 	struct cgroup_root *root = parent->root;
 	struct cgroup_root *root = parent->root;
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
 
 
 	cgroup_propagate_control(cgrp);
 	cgroup_propagate_control(cgrp);
 
 
-	/* @cgrp doesn't have dir yet so the following will only create csses */
-	ret = cgroup_apply_control_enable(cgrp);
-	if (ret)
-		goto out_destroy;
-
 	return cgrp;
 	return cgrp;
 
 
 out_cancel_ref:
 out_cancel_ref:
@@ -5300,9 +5300,6 @@ out_cancel_ref:
 out_free_cgrp:
 out_free_cgrp:
 	kfree(cgrp);
 	kfree(cgrp);
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
-out_destroy:
-	cgroup_destroy_locked(cgrp);
-	return ERR_PTR(ret);
 }
 }
 
 
 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,

+ 5 - 3
kernel/trace/trace_hwlat.c

@@ -266,7 +266,7 @@ out:
 static struct cpumask save_cpumask;
 static struct cpumask save_cpumask;
 static bool disable_migrate;
 static bool disable_migrate;
 
 
-static void move_to_next_cpu(void)
+static void move_to_next_cpu(bool initmask)
 {
 {
 	static struct cpumask *current_mask;
 	static struct cpumask *current_mask;
 	int next_cpu;
 	int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
 		return;
 		return;
 
 
 	/* Just pick the first CPU on first iteration */
 	/* Just pick the first CPU on first iteration */
-	if (!current_mask) {
+	if (initmask) {
 		current_mask = &save_cpumask;
 		current_mask = &save_cpumask;
 		get_online_cpus();
 		get_online_cpus();
 		cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
 		cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
 static int kthread_fn(void *data)
 static int kthread_fn(void *data)
 {
 {
 	u64 interval;
 	u64 interval;
+	bool initmask = true;
 
 
 	while (!kthread_should_stop()) {
 	while (!kthread_should_stop()) {
 
 
-		move_to_next_cpu();
+		move_to_next_cpu(initmask);
+		initmask = false;
 
 
 		local_irq_disable();
 		local_irq_disable();
 		get_sample();
 		get_sample();

+ 10 - 2
net/can/af_can.c

@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  * @func: callback function on filter match
  * @func: callback function on filter match
  * @data: returned parameter for callback function
  * @data: returned parameter for callback function
  * @ident: string for calling module identification
  * @ident: string for calling module identification
+ * @sk: socket pointer (might be NULL)
  *
  *
  * Description:
  * Description:
  *  Invokes the callback function with the received sk_buff and the given
  *  Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  */
  */
 int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
 int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
 		    void (*func)(struct sk_buff *, void *), void *data,
 		    void (*func)(struct sk_buff *, void *), void *data,
-		    char *ident)
+		    char *ident, struct sock *sk)
 {
 {
 	struct receiver *r;
 	struct receiver *r;
 	struct hlist_head *rl;
 	struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
 		r->func    = func;
 		r->func    = func;
 		r->data    = data;
 		r->data    = data;
 		r->ident   = ident;
 		r->ident   = ident;
+		r->sk      = sk;
 
 
 		hlist_add_head_rcu(&r->list, rl);
 		hlist_add_head_rcu(&r->list, rl);
 		d->entries++;
 		d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
 static void can_rx_delete_receiver(struct rcu_head *rp)
 static void can_rx_delete_receiver(struct rcu_head *rp)
 {
 {
 	struct receiver *r = container_of(rp, struct receiver, rcu);
 	struct receiver *r = container_of(rp, struct receiver, rcu);
+	struct sock *sk = r->sk;
 
 
 	kmem_cache_free(rcv_cache, r);
 	kmem_cache_free(rcv_cache, r);
+	if (sk)
+		sock_put(sk);
 }
 }
 
 
 /**
 /**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
 	spin_unlock(&can_rcvlists_lock);
 	spin_unlock(&can_rcvlists_lock);
 
 
 	/* schedule the receiver item for deletion */
 	/* schedule the receiver item for deletion */
-	if (r)
+	if (r) {
+		if (r->sk)
+			sock_hold(r->sk);
 		call_rcu(&r->rcu, can_rx_delete_receiver);
 		call_rcu(&r->rcu, can_rx_delete_receiver);
+	}
 }
 }
 EXPORT_SYMBOL(can_rx_unregister);
 EXPORT_SYMBOL(can_rx_unregister);
 
 

+ 2 - 1
net/can/af_can.h

@@ -50,13 +50,14 @@
 
 
 struct receiver {
 struct receiver {
 	struct hlist_node list;
 	struct hlist_node list;
-	struct rcu_head rcu;
 	canid_t can_id;
 	canid_t can_id;
 	canid_t mask;
 	canid_t mask;
 	unsigned long matches;
 	unsigned long matches;
 	void (*func)(struct sk_buff *, void *);
 	void (*func)(struct sk_buff *, void *);
 	void *data;
 	void *data;
 	char *ident;
 	char *ident;
+	struct sock *sk;
+	struct rcu_head rcu;
 };
 };
 
 
 #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
 #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)

+ 18 - 9
net/can/bcm.c

@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
 
 
 static void bcm_remove_op(struct bcm_op *op)
 static void bcm_remove_op(struct bcm_op *op)
 {
 {
-	hrtimer_cancel(&op->timer);
-	hrtimer_cancel(&op->thrtimer);
-
-	if (op->tsklet.func)
-		tasklet_kill(&op->tsklet);
+	if (op->tsklet.func) {
+		while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
+		       test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
+		       hrtimer_active(&op->timer)) {
+			hrtimer_cancel(&op->timer);
+			tasklet_kill(&op->tsklet);
+		}
+	}
 
 
-	if (op->thrtsklet.func)
-		tasklet_kill(&op->thrtsklet);
+	if (op->thrtsklet.func) {
+		while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
+		       test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
+		       hrtimer_active(&op->thrtimer)) {
+			hrtimer_cancel(&op->thrtimer);
+			tasklet_kill(&op->thrtsklet);
+		}
+	}
 
 
 	if ((op->frames) && (op->frames != &op->sframe))
 	if ((op->frames) && (op->frames != &op->sframe))
 		kfree(op->frames);
 		kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 				err = can_rx_register(dev, op->can_id,
 				err = can_rx_register(dev, op->can_id,
 						      REGMASK(op->can_id),
 						      REGMASK(op->can_id),
 						      bcm_rx_handler, op,
 						      bcm_rx_handler, op,
-						      "bcm");
+						      "bcm", sk);
 
 
 				op->rx_reg_dev = dev;
 				op->rx_reg_dev = dev;
 				dev_put(dev);
 				dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 		} else
 		} else
 			err = can_rx_register(NULL, op->can_id,
 			err = can_rx_register(NULL, op->can_id,
 					      REGMASK(op->can_id),
 					      REGMASK(op->can_id),
-					      bcm_rx_handler, op, "bcm");
+					      bcm_rx_handler, op, "bcm", sk);
 		if (err) {
 		if (err) {
 			/* this bcm rx op is broken -> remove it */
 			/* this bcm rx op is broken -> remove it */
 			list_del(&op->list);
 			list_del(&op->list);

+ 1 - 1
net/can/gw.c

@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
 {
 {
 	return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
 	return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
 			       gwj->ccgw.filter.can_mask, can_can_gw_rcv,
 			       gwj->ccgw.filter.can_mask, can_can_gw_rcv,
-			       gwj, "gw");
+			       gwj, "gw", NULL);
 }
 }
 
 
 static inline void cgw_unregister_filter(struct cgw_job *gwj)
 static inline void cgw_unregister_filter(struct cgw_job *gwj)

+ 2 - 2
net/can/raw.c

@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
 	for (i = 0; i < count; i++) {
 	for (i = 0; i < count; i++) {
 		err = can_rx_register(dev, filter[i].can_id,
 		err = can_rx_register(dev, filter[i].can_id,
 				      filter[i].can_mask,
 				      filter[i].can_mask,
-				      raw_rcv, sk, "raw");
+				      raw_rcv, sk, "raw", sk);
 		if (err) {
 		if (err) {
 			/* clean up successfully registered filters */
 			/* clean up successfully registered filters */
 			while (--i >= 0)
 			while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
 
 
 	if (err_mask)
 	if (err_mask)
 		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
 		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
-				      raw_rcv, sk, "raw");
+				      raw_rcv, sk, "raw", sk);
 
 
 	return err;
 	return err;
 }
 }

+ 4 - 2
net/ipv4/tcp_output.c

@@ -2515,9 +2515,11 @@ u32 __tcp_select_window(struct sock *sk)
 	int full_space = min_t(int, tp->window_clamp, allowed_space);
 	int full_space = min_t(int, tp->window_clamp, allowed_space);
 	int window;
 	int window;
 
 
-	if (mss > full_space)
+	if (unlikely(mss > full_space)) {
 		mss = full_space;
 		mss = full_space;
-
+		if (mss <= 0)
+			return 0;
+	}
 	if (free_space < (full_space >> 1)) {
 	if (free_space < (full_space >> 1)) {
 		icsk->icsk_ack.quick = 0;
 		icsk->icsk_ack.quick = 0;
 
 

+ 1 - 1
net/ipv6/ip6_output.c

@@ -1344,7 +1344,7 @@ emsgsize:
 	 */
 	 */
 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
 	    headersize == sizeof(struct ipv6hdr) &&
 	    headersize == sizeof(struct ipv6hdr) &&
-	    length < mtu - headersize &&
+	    length <= mtu - headersize &&
 	    !(flags & MSG_MORE) &&
 	    !(flags & MSG_MORE) &&
 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
 	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
 		csummode = CHECKSUM_PARTIAL;
 		csummode = CHECKSUM_PARTIAL;

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно