Bläddra i källkod

Merge back earlier ACPI thermal material.

Rafael J. Wysocki 11 år sedan
förälder
incheckning
f70977fbd6
100 ändrade filer med 2133 tillägg och 802 borttagningar
  1. 6 6
      Documentation/DocBook/drm.tmpl
  2. 1 1
      Documentation/DocBook/media/Makefile
  3. 1 1
      Documentation/devicetree/bindings/net/mdio-gpio.txt
  4. 5 10
      Documentation/email-clients.txt
  5. 3 2
      Documentation/filesystems/proc.txt
  6. 14 0
      Documentation/hwmon/sysfs-interface
  7. 8 0
      Documentation/java.txt
  8. 1 1
      Documentation/networking/filter.txt
  9. 1 1
      Documentation/networking/packet_mmap.txt
  10. 3 3
      MAINTAINERS
  11. 1 1
      Makefile
  12. 1 1
      arch/parisc/kernel/syscall_table.S
  13. 1 1
      arch/s390/net/bpf_jit_comp.c
  14. 4 2
      arch/sparc/include/asm/pgtable_64.h
  15. 1 1
      arch/sparc/kernel/sysfs.c
  16. 1 0
      arch/sparc/lib/NG2memcpy.S
  17. 1 15
      arch/sparc/mm/fault_64.c
  18. 13 1
      arch/sparc/mm/tsb.c
  19. 0 1
      arch/x86/kernel/cpu/perf_event_intel.c
  20. 1 1
      arch/x86/net/bpf_jit_comp.c
  21. 24 4
      drivers/acpi/bus.c
  22. 4 7
      drivers/acpi/thermal.c
  23. 2 0
      drivers/dma/dmaengine.c
  24. 3 5
      drivers/dma/mv_xor.c
  25. 1 1
      drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
  26. 1 0
      drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
  27. 4 2
      drivers/gpu/drm/radeon/radeon.h
  28. 14 0
      drivers/gpu/drm/radeon/radeon_bios.c
  29. 1 1
      drivers/gpu/drm/radeon/radeon_display.c
  30. 29 26
      drivers/gpu/drm/radeon/radeon_kms.c
  31. 24 16
      drivers/gpu/drm/radeon/radeon_object.c
  32. 41 1
      drivers/gpu/drm/radeon/radeon_pm.c
  33. 100 30
      drivers/gpu/drm/radeon/radeon_vce.c
  34. 1 1
      drivers/gpu/drm/radeon/radeon_vm.c
  35. 2 2
      drivers/gpu/drm/radeon/sid.h
  36. 1 1
      drivers/hwmon/Kconfig
  37. 9 6
      drivers/hwmon/ntc_thermistor.c
  38. 64 3
      drivers/infiniband/hw/mlx4/main.c
  39. 3 0
      drivers/infiniband/hw/mlx4/mlx4_ib.h
  40. 8 0
      drivers/infiniband/hw/mlx4/qp.c
  41. 38 16
      drivers/net/bonding/bond_alb.c
  42. 65 69
      drivers/net/bonding/bond_main.c
  43. 1 0
      drivers/net/bonding/bond_options.c
  44. 1 0
      drivers/net/bonding/bonding.h
  45. 0 7
      drivers/net/can/c_can/Kconfig
  46. 0 36
      drivers/net/can/c_can/c_can.c
  47. 9 5
      drivers/net/can/sja1000/peak_pci.c
  48. 12 0
      drivers/net/ethernet/Kconfig
  49. 1 0
      drivers/net/ethernet/Makefile
  50. 1 0
      drivers/net/ethernet/altera/Makefile
  51. 55 55
      drivers/net/ethernet/altera/altera_msgdma.c
  52. 4 9
      drivers/net/ethernet/altera/altera_msgdmahw.h
  53. 90 91
      drivers/net/ethernet/altera/altera_sgdma.c
  54. 14 12
      drivers/net/ethernet/altera/altera_sgdmahw.h
  55. 47 0
      drivers/net/ethernet/altera/altera_tse.h
  56. 71 37
      drivers/net/ethernet/altera/altera_tse_ethtool.c
  57. 76 57
      drivers/net/ethernet/altera/altera_tse_main.c
  58. 10 10
      drivers/net/ethernet/altera/altera_utils.c
  59. 4 4
      drivers/net/ethernet/altera/altera_utils.h
  60. 7 3
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
  61. 1 1
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
  62. 1 1
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
  63. 706 0
      drivers/net/ethernet/ec_bhf.c
  64. 6 0
      drivers/net/ethernet/emulex/benet/be_main.c
  65. 47 6
      drivers/net/ethernet/jme.c
  66. 2 2
      drivers/net/ethernet/mellanox/mlx4/cmd.c
  67. 6 0
      drivers/net/ethernet/mellanox/mlx4/mlx4.h
  68. 35 0
      drivers/net/ethernet/mellanox/mlx4/qp.c
  69. 54 0
      drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
  70. 0 16
      drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
  71. 53 4
      drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
  72. 8 6
      drivers/net/ethernet/sfc/nic.c
  73. 1 3
      drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  74. 1 1
      drivers/net/ethernet/sun/cassini.c
  75. 6 11
      drivers/net/ethernet/ti/cpsw.c
  76. 14 4
      drivers/net/macvlan.c
  77. 4 0
      drivers/net/phy/mdio-gpio.c
  78. 9 7
      drivers/net/phy/phy.c
  79. 2 2
      drivers/net/phy/phy_device.c
  80. 41 16
      drivers/net/usb/cdc_mbim.c
  81. 4 1
      drivers/net/wireless/ath/ath9k/htc_drv_main.c
  82. 1 1
      drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
  83. 3 3
      drivers/net/wireless/iwlwifi/mvm/coex.c
  84. 4 4
      drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
  85. 7 2
      drivers/net/wireless/iwlwifi/mvm/mac80211.c
  86. 3 0
      drivers/net/wireless/iwlwifi/mvm/mvm.h
  87. 1 1
      drivers/net/wireless/iwlwifi/mvm/rs.c
  88. 13 42
      drivers/net/wireless/iwlwifi/mvm/scan.c
  89. 19 0
      drivers/net/wireless/iwlwifi/mvm/utils.c
  90. 6 4
      drivers/net/wireless/iwlwifi/pcie/trans.c
  91. 1 1
      drivers/net/xen-netback/common.h
  92. 3 27
      drivers/net/xen-netback/interface.c
  93. 82 20
      drivers/net/xen-netback/netback.c
  94. 2 1
      drivers/ptp/Kconfig
  95. 1 2
      drivers/scsi/scsi_transport_sas.c
  96. 19 0
      fs/afs/cmservice.c
  97. 1 1
      fs/afs/internal.h
  98. 43 43
      fs/afs/rxrpc.c
  99. 1 1
      fs/nfsd/nfs4acl.c
  100. 13 2
      fs/nfsd/nfs4state.c

+ 6 - 6
Documentation/DocBook/drm.tmpl

@@ -79,7 +79,7 @@
   <partintro>
   <partintro>
     <para>
     <para>
       This first part of the DRM Developer's Guide documents core DRM code,
       This first part of the DRM Developer's Guide documents core DRM code,
-      helper libraries for writting drivers and generic userspace interfaces
+      helper libraries for writing drivers and generic userspace interfaces
       exposed by DRM drivers.
       exposed by DRM drivers.
     </para>
     </para>
   </partintro>
   </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
       providing a solution to every graphics memory-related problems, GEM
       providing a solution to every graphics memory-related problems, GEM
       identified common code between drivers and created a support library to
       identified common code between drivers and created a support library to
       share it. GEM has simpler initialization and execution requirements than
       share it. GEM has simpler initialization and execution requirements than
-      TTM, but has no video RAM management capabitilies and is thus limited to
+      TTM, but has no video RAM management capabilities and is thus limited to
       UMA devices.
       UMA devices.
     </para>
     </para>
     <sect2>
     <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
 	    vice versa. Drivers must use the kernel dma-buf buffer sharing framework
 	    vice versa. Drivers must use the kernel dma-buf buffer sharing framework
 	    to manage the PRIME file descriptors. Similar to the mode setting
 	    to manage the PRIME file descriptors. Similar to the mode setting
 	    API PRIME is agnostic to the underlying buffer object manager, as
 	    API PRIME is agnostic to the underlying buffer object manager, as
-	    long as handles are 32bit unsinged integers.
+	    long as handles are 32bit unsigned integers.
 	  </para>
 	  </para>
 	  <para>
 	  <para>
 	    While non-GEM drivers must implement the operations themselves, GEM
 	    While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
       first create properties and then create and associate individual instances
       first create properties and then create and associate individual instances
       of those properties to objects. A property can be instantiated multiple
       of those properties to objects. A property can be instantiated multiple
       times and associated with different objects. Values are stored in property
       times and associated with different objects. Values are stored in property
-      instances, and all other property information are stored in the propery
+      instances, and all other property information are stored in the property
       and shared between all instances of the property.
       and shared between all instances of the property.
     </para>
     </para>
     <para>
     <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
   <sect1>
   <sect1>
     <title>Legacy Support Code</title>
     <title>Legacy Support Code</title>
     <para>
     <para>
-      The section very brievely covers some of the old legacy support code which
+      The section very briefly covers some of the old legacy support code which
       is only used by old DRM drivers which have done a so-called shadow-attach
       is only used by old DRM drivers which have done a so-called shadow-attach
       to the underlying device instead of registering as a real driver. This
       to the underlying device instead of registering as a real driver. This
-      also includes some of the old generic buffer mangement and command
+      also includes some of the old generic buffer management and command
       submission code. Do not use any of this in new and modern drivers.
       submission code. Do not use any of this in new and modern drivers.
     </para>
     </para>
 
 

+ 1 - 1
Documentation/DocBook/media/Makefile

@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
 #
 #
 
 
 install_media_images = \
 install_media_images = \
-	$(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+	$(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
 
 
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
 $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
 	$(Q)base64 -d $< >$@
 	$(Q)base64 -d $< >$@

+ 1 - 1
Documentation/devicetree/bindings/net/mdio-gpio.txt

@@ -14,7 +14,7 @@ node.
 Example:
 Example:
 
 
 aliases {
 aliases {
-	mdio-gpio0 = <&mdio0>;
+	mdio-gpio0 = &mdio0;
 };
 };
 
 
 mdio0: mdio {
 mdio0: mdio {

+ 5 - 10
Documentation/email-clients.txt

@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
 
 
 - Edit your Thunderbird config settings so that it won't use format=flowed.
 - Edit your Thunderbird config settings so that it won't use format=flowed.
   Go to "edit->preferences->advanced->config editor" to bring up the
   Go to "edit->preferences->advanced->config editor" to bring up the
-  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
-  "false".
+  thunderbird's registry editor.
 
 
-- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
+- Set "mailnews.send_plaintext_flowed" to "false"
 
 
-- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
+- Set "mailnews.wraplength" from "72" to "0"
 
 
-- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
+- "View" > "Message Body As" > "Plain Text"
 
 
-- Install the "toggle wordwrap" extension.  Download the file from:
-    https://addons.mozilla.org/thunderbird/addon/2351/
-  Then go to "tools->add ons", select "install" at the bottom of the screen,
-  and browse to where you saved the .xul file.  This adds an "Enable
-  Wordwrap" entry under the Options menu of the message composer.
+- "View" > "Character Encoding" > "Unicode (UTF-8)"
 
 
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
 TkRat (GUI)

+ 3 - 2
Documentation/filesystems/proc.txt

@@ -1245,8 +1245,9 @@ second).  The meanings of the columns are as follows, from left to right:
 
 
 The "intr" line gives counts of interrupts  serviced since boot time, for each
 The "intr" line gives counts of interrupts  serviced since boot time, for each
 of the  possible system interrupts.   The first  column  is the  total of  all
 of the  possible system interrupts.   The first  column  is the  total of  all
-interrupts serviced; each  subsequent column is the  total for that particular
-interrupt.
+interrupts serviced  including  unnumbered  architecture specific  interrupts;
+each  subsequent column is the  total for that particular numbered interrupt.
+Unnumbered interrupts are not shown, only summed into the total.
 
 
 The "ctxt" line gives the total number of context switches across all CPUs.
 The "ctxt" line gives the total number of context switches across all CPUs.
 
 

+ 14 - 0
Documentation/hwmon/sysfs-interface

@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
 		from the max value.
 		from the max value.
 		RW
 		RW
 
 
+temp[1-*]_min_hyst
+		Temperature hysteresis value for min limit.
+		Unit: millidegree Celsius
+		Must be reported as an absolute temperature, NOT a delta
+		from the min value.
+		RW
+
 temp[1-*]_input Temperature input value.
 temp[1-*]_input Temperature input value.
 		Unit: millidegree Celsius
 		Unit: millidegree Celsius
 		RO
 		RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit	Temperature critical min value, typically lower than
 		Unit: millidegree Celsius
 		Unit: millidegree Celsius
 		RW
 		RW
 
 
+temp[1-*]_lcrit_hyst
+		Temperature hysteresis value for critical min limit.
+		Unit: millidegree Celsius
+		Must be reported as an absolute temperature, NOT a delta
+		from the critical min value.
+		RW
+
 temp[1-*]_offset
 temp[1-*]_offset
 		Temperature offset which is added to the temperature reading
 		Temperature offset which is added to the temperature reading
 		by the chip.
 		by the chip.

+ 8 - 0
Documentation/java.txt

@@ -188,6 +188,9 @@ shift
 #define CP_METHODREF 10
 #define CP_METHODREF 10
 #define CP_INTERFACEMETHODREF 11
 #define CP_INTERFACEMETHODREF 11
 #define CP_NAMEANDTYPE 12
 #define CP_NAMEANDTYPE 12
+#define CP_METHODHANDLE 15
+#define CP_METHODTYPE 16
+#define CP_INVOKEDYNAMIC 18
 
 
 /* Define some commonly used error messages */
 /* Define some commonly used error messages */
 
 
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
 		break;
 		break;
 	case CP_CLASS:
 	case CP_CLASS:
 	case CP_STRING:
 	case CP_STRING:
+	case CP_METHODTYPE:
 		seekerr = fseek(classfile, 2, SEEK_CUR);
 		seekerr = fseek(classfile, 2, SEEK_CUR);
 		break;
 		break;
+	case CP_METHODHANDLE:
+		seekerr = fseek(classfile, 3, SEEK_CUR);
+		break;
 	case CP_INTEGER:
 	case CP_INTEGER:
 	case CP_FLOAT:
 	case CP_FLOAT:
 	case CP_FIELDREF:
 	case CP_FIELDREF:
 	case CP_METHODREF:
 	case CP_METHODREF:
 	case CP_INTERFACEMETHODREF:
 	case CP_INTERFACEMETHODREF:
 	case CP_NAMEANDTYPE:
 	case CP_NAMEANDTYPE:
+	case CP_INVOKEDYNAMIC:
 		seekerr = fseek(classfile, 4, SEEK_CUR);
 		seekerr = fseek(classfile, 4, SEEK_CUR);
 		break;
 		break;
 	case CP_LONG:
 	case CP_LONG:

+ 1 - 1
Documentation/networking/filter.txt

@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
   mark                                  skb->mark
   mark                                  skb->mark
   queue                                 skb->queue_mapping
   queue                                 skb->queue_mapping
   hatype                                skb->dev->type
   hatype                                skb->dev->type
-  rxhash                                skb->rxhash
+  rxhash                                skb->hash
   cpu                                   raw_smp_processor_id()
   cpu                                   raw_smp_processor_id()
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_pr                               vlan_tx_tag_present(skb)
   vlan_pr                               vlan_tx_tag_present(skb)

+ 1 - 1
Documentation/networking/packet_mmap.txt

@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
 
 
 Currently implemented fanout policies are:
 Currently implemented fanout policies are:
 
 
-  - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+  - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
   - PACKET_FANOUT_LB: schedule to socket by round-robin
   - PACKET_FANOUT_LB: schedule to socket by round-robin
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
   - PACKET_FANOUT_RND: schedule to socket by random selection
   - PACKET_FANOUT_RND: schedule to socket by random selection

+ 3 - 3
MAINTAINERS

@@ -537,7 +537,7 @@ L:	linux-alpha@vger.kernel.org
 F:	arch/alpha/
 F:	arch/alpha/
 
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:	Vince Bridgers <vbridgers2013@gmail.com
+M:	Vince Bridgers <vbridgers2013@gmail.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 L:	nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 L:	nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:	Maintained
 S:	Maintained
@@ -6514,10 +6514,10 @@ T:	git git://openrisc.net/~jonas/linux
 F:	arch/openrisc/
 F:	arch/openrisc/
 
 
 OPENVSWITCH
 OPENVSWITCH
-M:	Jesse Gross <jesse@nicira.com>
+M:	Pravin Shelar <pshelar@nicira.com>
 L:	dev@openvswitch.org
 L:	dev@openvswitch.org
 W:	http://openvswitch.org
 W:	http://openvswitch.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
 S:	Maintained
 S:	Maintained
 F:	net/openvswitch/
 F:	net/openvswitch/
 
 

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 15
 PATCHLEVEL = 15
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 NAME = Shuffling Zombie Juror
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 1
arch/parisc/kernel/syscall_table.S

@@ -432,7 +432,7 @@
 	ENTRY_SAME(sched_setattr)
 	ENTRY_SAME(sched_setattr)
 	ENTRY_SAME(sched_getattr)	/* 335 */
 	ENTRY_SAME(sched_getattr)	/* 335 */
 	ENTRY_COMP(utimes)
 	ENTRY_COMP(utimes)
-	ENTRY_COMP(renameat2)
+	ENTRY_SAME(renameat2)
 
 
 	/* Nothing yet */
 	/* Nothing yet */
 
 

+ 1 - 1
arch/s390/net/bpf_jit_comp.c

@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
 		return NULL;
 		return NULL;
 	memset(header, 0, sz);
 	memset(header, 0, sz);
 	header->pages = sz / PAGE_SIZE;
 	header->pages = sz / PAGE_SIZE;
-	hole = sz - (bpfsize + sizeof(*header));
+	hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 	/* Insert random number of illegal instructions before BPF code
 	/* Insert random number of illegal instructions before BPF code
 	 * and make sure the first instruction starts at an even address.
 	 * and make sure the first instruction starts at an even address.
 	 */
 	 */

+ 4 - 2
arch/sparc/include/asm/pgtable_64.h

@@ -24,7 +24,8 @@
 
 
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
  * The page copy blockops can use 0x6000000 to 0x8000000.
  * The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * The vmalloc area spans 0x100000000 to 0x200000000.
  * Since modules need to be in the lowest 32-bits of the address space,
  * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
  * 0x400000000.
  * 0x400000000.
  */
  */
 #define	TLBTEMP_BASE		_AC(0x0000000006000000,UL)
 #define	TLBTEMP_BASE		_AC(0x0000000006000000,UL)
-#define	TSBMAP_BASE		_AC(0x0000000008000000,UL)
+#define	TSBMAP_8K_BASE		_AC(0x0000000008000000,UL)
+#define	TSBMAP_4M_BASE		_AC(0x0000000008400000,UL)
 #define MODULES_VADDR		_AC(0x0000000010000000,UL)
 #define MODULES_VADDR		_AC(0x0000000010000000,UL)
 #define MODULES_LEN		_AC(0x00000000e0000000,UL)
 #define MODULES_LEN		_AC(0x00000000e0000000,UL)
 #define MODULES_END		_AC(0x00000000f0000000,UL)
 #define MODULES_END		_AC(0x00000000f0000000,UL)

+ 1 - 1
arch/sparc/kernel/sysfs.c

@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
 			size_t count)
 			size_t count)
 {
 {
 	unsigned long val, err;
 	unsigned long val, err;
-	int ret = sscanf(buf, "%ld", &val);
+	int ret = sscanf(buf, "%lu", &val);
 
 
 	if (ret != 1)
 	if (ret != 1)
 		return -EINVAL;
 		return -EINVAL;

+ 1 - 0
arch/sparc/lib/NG2memcpy.S

@@ -236,6 +236,7 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 	 */
 	 */
 	VISEntryHalf
 	VISEntryHalf
 
 
+	membar		#Sync
 	alignaddr	%o1, %g0, %g0
 	alignaddr	%o1, %g0, %g0
 
 
 	add		%o1, (64 - 1), %o4
 	add		%o1, (64 - 1), %o4

+ 1 - 15
arch/sparc/mm/fault_64.c

@@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
 	show_regs(regs);
 	show_regs(regs);
 }
 }
 
 
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
-							 unsigned long addr)
-{
-	static int times;
-
-	if (times++ < 10)
-		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
-		       "reports 64-bit fault address [%lx]\n",
-		       current->comm, current->pid, addr);
-	show_regs(regs);
-}
-
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
 {
 	enum ctx_state prev_state = exception_enter();
 	enum ctx_state prev_state = exception_enter();
@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 				goto intr_or_no_mm;
 				goto intr_or_no_mm;
 			}
 			}
 		}
 		}
-		if (unlikely((address >> 32) != 0)) {
-			bogus_32bit_fault_address(regs, address);
+		if (unlikely((address >> 32) != 0))
 			goto intr_or_no_mm;
 			goto intr_or_no_mm;
-		}
 	}
 	}
 
 
 	if (regs->tstate & TSTATE_PRIV) {
 	if (regs->tstate & TSTATE_PRIV) {

+ 13 - 1
arch/sparc/mm/tsb.c

@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
 	mm->context.tsb_block[tsb_idx].tsb_nentries =
 	mm->context.tsb_block[tsb_idx].tsb_nentries =
 		tsb_bytes / sizeof(struct tsb);
 		tsb_bytes / sizeof(struct tsb);
 
 
-	base = TSBMAP_BASE;
+	switch (tsb_idx) {
+	case MM_TSB_BASE:
+		base = TSBMAP_8K_BASE;
+		break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	case MM_TSB_HUGE:
+		base = TSBMAP_4M_BASE;
+		break;
+#endif
+	default:
+		BUG();
+	}
+
 	tte = pgprot_val(PAGE_KERNEL_LOCKED);
 	tte = pgprot_val(PAGE_KERNEL_LOCKED);
 	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
 	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));

+ 0 - 1
arch/x86/kernel/cpu/perf_event_intel.c

@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 {
 {
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
-	FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
 	FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
 	EVENT_CONSTRAINT_END
 	EVENT_CONSTRAINT_END
 };
 };

+ 1 - 1
arch/x86/net/bpf_jit_comp.c

@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
 	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 
 
 	header->pages = sz / PAGE_SIZE;
 	header->pages = sz / PAGE_SIZE;
-	hole = sz - (proglen + sizeof(*header));
+	hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 
 
 	/* insert a random number of int3 instructions before BPF code */
 	/* insert a random number of int3 instructions before BPF code */
 	*image_ptr = &header->image[prandom_u32() % hole];
 	*image_ptr = &header->image[prandom_u32() % hole];

+ 24 - 4
drivers/acpi/bus.c

@@ -132,6 +132,21 @@ void acpi_bus_private_data_handler(acpi_handle handle,
 }
 }
 EXPORT_SYMBOL(acpi_bus_private_data_handler);
 EXPORT_SYMBOL(acpi_bus_private_data_handler);
 
 
+int acpi_bus_attach_private_data(acpi_handle handle, void *data)
+{
+	acpi_status status;
+
+	status = acpi_attach_data(handle,
+			acpi_bus_private_data_handler, data);
+	if (ACPI_FAILURE(status)) {
+		acpi_handle_debug(handle, "Error attaching device data\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_attach_private_data);
+
 int acpi_bus_get_private_data(acpi_handle handle, void **data)
 int acpi_bus_get_private_data(acpi_handle handle, void **data)
 {
 {
 	acpi_status status;
 	acpi_status status;
@@ -140,15 +155,20 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
 	status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
-	if (ACPI_FAILURE(status) || !*data) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
-				handle));
+	if (ACPI_FAILURE(status)) {
+		acpi_handle_debug(handle, "No context for object\n");
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL(acpi_bus_get_private_data);
+EXPORT_SYMBOL_GPL(acpi_bus_get_private_data);
+
+void acpi_bus_detach_private_data(acpi_handle handle)
+{
+	acpi_detach_data(handle, acpi_bus_private_data_handler);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
 
 
 void acpi_bus_no_hotplug(acpi_handle handle)
 void acpi_bus_no_hotplug(acpi_handle handle)
 {
 {

+ 4 - 7
drivers/acpi/thermal.c

@@ -925,13 +925,10 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
 	if (result)
 	if (result)
 		return result;
 		return result;
 
 
-	status = acpi_attach_data(tz->device->handle,
-				  acpi_bus_private_data_handler,
-				  tz->thermal_zone);
-	if (ACPI_FAILURE(status)) {
-		pr_err(PREFIX "Error attaching device data\n");
+	status =  acpi_bus_attach_private_data(tz->device->handle,
+					       tz->thermal_zone);
+	if (ACPI_FAILURE(status))
 		return -ENODEV;
 		return -ENODEV;
-	}
 
 
 	tz->tz_enabled = 1;
 	tz->tz_enabled = 1;
 
 
@@ -946,7 +943,7 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
 	sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
 	sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
 	thermal_zone_device_unregister(tz->thermal_zone);
 	thermal_zone_device_unregister(tz->thermal_zone);
 	tz->thermal_zone = NULL;
 	tz->thermal_zone = NULL;
-	acpi_detach_data(tz->device->handle, acpi_bus_private_data_handler);
+	acpi_bus_detach_private_data(tz->device->handle);
 }
 }
 
 
 
 

+ 2 - 0
drivers/dma/dmaengine.c

@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
 			       DMA_BIDIRECTIONAL);
 			       DMA_BIDIRECTIONAL);
 	}
 	}
+	cnt = unmap->map_cnt;
 	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
 	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
 }
 }
 
 
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
 	memset(unmap, 0, sizeof(*unmap));
 	memset(unmap, 0, sizeof(*unmap));
 	kref_init(&unmap->kref);
 	kref_init(&unmap->kref);
 	unmap->dev = dev;
 	unmap->dev = dev;
+	unmap->map_cnt = nr;
 
 
 	return unmap;
 	return unmap;
 }
 }

+ 3 - 5
drivers/dma/mv_xor.c

@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
 
 
 static void mv_chan_activate(struct mv_xor_chan *chan)
 static void mv_chan_activate(struct mv_xor_chan *chan)
 {
 {
-	u32 activation;
-
 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
-	activation = readl_relaxed(XOR_ACTIVATION(chan));
-	activation |= 0x1;
-	writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+	/* writel ensures all descriptors are flushed before activation */
+	writel(BIT(0), XOR_ACTIVATION(chan));
 }
 }
 
 
 static char mv_chan_is_busy(struct mv_xor_chan *chan)
 static char mv_chan_is_busy(struct mv_xor_chan *chan)

+ 1 - 1
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c

@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
 	}
 	}
 
 
 	if (outp == 8)
 	if (outp == 8)
-		return false;
+		return conf;
 
 
 	data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
 	data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
 	if (data == 0x0000)
 	if (data == 0x0000)

+ 1 - 0
drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c

@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
 		case 0x00: return 2;
 		case 0x00: return 2;
 		case 0x19: return 1;
 		case 0x19: return 1;
 		case 0x1c: return 0;
 		case 0x1c: return 0;
+		case 0x1e: return 2;
 		default:
 		default:
 			break;
 			break;
 		}
 		}

+ 4 - 2
drivers/gpu/drm/radeon/radeon.h

@@ -1642,6 +1642,7 @@ struct radeon_vce {
 	unsigned		fb_version;
 	unsigned		fb_version;
 	atomic_t		handles[RADEON_MAX_VCE_HANDLES];
 	atomic_t		handles[RADEON_MAX_VCE_HANDLES];
 	struct drm_file		*filp[RADEON_MAX_VCE_HANDLES];
 	struct drm_file		*filp[RADEON_MAX_VCE_HANDLES];
+	unsigned		img_size[RADEON_MAX_VCE_HANDLES];
 	struct delayed_work	idle_work;
 	struct delayed_work	idle_work;
 };
 };
 
 
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
 			       uint32_t handle, struct radeon_fence **fence);
 			       uint32_t handle, struct radeon_fence **fence);
 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
 void radeon_vce_note_usage(struct radeon_device *rdev);
 void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
 int radeon_vce_cs_parse(struct radeon_cs_parser *p);
 int radeon_vce_cs_parse(struct radeon_cs_parser *p);
 bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
 bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
 			       struct radeon_ring *ring,
 			       struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
 #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
 #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
 #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
 #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+			     (rdev->family == CHIP_MULLINS))
 
 
 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
 			      (rdev->ddev->pdev->device == 0x6850) || \
 			      (rdev->ddev->pdev->device == 0x6850) || \

+ 14 - 0
drivers/gpu/drm/radeon/radeon_bios.c

@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 		}
 		}
 	}
 	}
 
 
+	if (!found) {
+		while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+			dhandle = ACPI_HANDLE(&pdev->dev);
+			if (!dhandle)
+				continue;
+
+			status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+			if (!ACPI_FAILURE(status)) {
+				found = true;
+				break;
+			}
+		}
+	}
+
 	if (!found)
 	if (!found)
 		return false;
 		return false;
 
 

+ 1 - 1
drivers/gpu/drm/radeon/radeon_display.c

@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
 
 
 	/* avoid high jitter with small fractional dividers */
 	/* avoid high jitter with small fractional dividers */
 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
 	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
-		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
+		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
 		if (fb_div < fb_div_min) {
 		if (fb_div < fb_div_min) {
 			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
 			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
 			fb_div *= tmp;
 			fb_div *= tmp;

+ 29 - 26
drivers/gpu/drm/radeon/radeon_kms.c

@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 			return r;
 			return r;
 		}
 		}
 
 
-		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-		if (r) {
-			radeon_vm_fini(rdev, &fpriv->vm);
-			kfree(fpriv);
-			return r;
-		}
+		if (rdev->accel_working) {
+			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+			if (r) {
+				radeon_vm_fini(rdev, &fpriv->vm);
+				kfree(fpriv);
+				return r;
+			}
 
 
-		/* map the ib pool buffer read only into
-		 * virtual address space */
-		bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-					 rdev->ring_tmp_bo.bo);
-		r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
-					  RADEON_VM_PAGE_READABLE |
-					  RADEON_VM_PAGE_SNOOPED);
+			/* map the ib pool buffer read only into
+			 * virtual address space */
+			bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+						 rdev->ring_tmp_bo.bo);
+			r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+						  RADEON_VM_PAGE_READABLE |
+						  RADEON_VM_PAGE_SNOOPED);
 
 
-		radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
-		if (r) {
-			radeon_vm_fini(rdev, &fpriv->vm);
-			kfree(fpriv);
-			return r;
+			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+			if (r) {
+				radeon_vm_fini(rdev, &fpriv->vm);
+				kfree(fpriv);
+				return r;
+			}
 		}
 		}
-
 		file_priv->driver_priv = fpriv;
 		file_priv->driver_priv = fpriv;
 	}
 	}
 
 
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
 		struct radeon_bo_va *bo_va;
 		struct radeon_bo_va *bo_va;
 		int r;
 		int r;
 
 
-		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
-		if (!r) {
-			bo_va = radeon_vm_bo_find(&fpriv->vm,
-						  rdev->ring_tmp_bo.bo);
-			if (bo_va)
-				radeon_vm_bo_rmv(rdev, bo_va);
-			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+		if (rdev->accel_working) {
+			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+			if (!r) {
+				bo_va = radeon_vm_bo_find(&fpriv->vm,
+							  rdev->ring_tmp_bo.bo);
+				if (bo_va)
+					radeon_vm_bo_rmv(rdev, bo_va);
+				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+			}
 		}
 		}
 
 
 		radeon_vm_fini(rdev, &fpriv->vm);
 		radeon_vm_fini(rdev, &fpriv->vm);

+ 24 - 16
drivers/gpu/drm/radeon/radeon_object.c

@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
 			 * into account. We don't want to disallow buffer moves
 			 * into account. We don't want to disallow buffer moves
 			 * completely.
 			 * completely.
 			 */
 			 */
-			if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+			if ((lobj->alt_domain & current_domain) != 0 &&
 			    (domain & current_domain) == 0 && /* will be moved */
 			    (domain & current_domain) == 0 && /* will be moved */
 			    bytes_moved > bytes_moved_threshold) {
 			    bytes_moved > bytes_moved_threshold) {
 				/* don't move it */
 				/* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	rbo = container_of(bo, struct radeon_bo, tbo);
 	rbo = container_of(bo, struct radeon_bo, tbo);
 	radeon_bo_check_tiling(rbo, 0, 0);
 	radeon_bo_check_tiling(rbo, 0, 0);
 	rdev = rbo->rdev;
 	rdev = rbo->rdev;
-	if (bo->mem.mem_type == TTM_PL_VRAM) {
-		size = bo->mem.num_pages << PAGE_SHIFT;
-		offset = bo->mem.start << PAGE_SHIFT;
-		if ((offset + size) > rdev->mc.visible_vram_size) {
-			/* hurrah the memory is not visible ! */
-			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-			r = ttm_bo_validate(bo, &rbo->placement, false, false);
-			if (unlikely(r != 0))
-				return r;
-			offset = bo->mem.start << PAGE_SHIFT;
-			/* this should not happen */
-			if ((offset + size) > rdev->mc.visible_vram_size)
-				return -EINVAL;
-		}
+	if (bo->mem.mem_type != TTM_PL_VRAM)
+		return 0;
+
+	size = bo->mem.num_pages << PAGE_SHIFT;
+	offset = bo->mem.start << PAGE_SHIFT;
+	if ((offset + size) <= rdev->mc.visible_vram_size)
+		return 0;
+
+	/* hurrah the memory is not visible ! */
+	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+	rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+	r = ttm_bo_validate(bo, &rbo->placement, false, false);
+	if (unlikely(r == -ENOMEM)) {
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		return ttm_bo_validate(bo, &rbo->placement, false, false);
+	} else if (unlikely(r != 0)) {
+		return r;
 	}
 	}
+
+	offset = bo->mem.start << PAGE_SHIFT;
+	/* this should never happen */
+	if ((offset + size) > rdev->mc.visible_vram_size)
+		return -EINVAL;
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 41 - 1
drivers/gpu/drm/radeon/radeon_pm.c

@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	struct radeon_device *rdev = ddev->dev_private;
 
 
+	/* Can't set profile when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return -EINVAL;
+
 	mutex_lock(&rdev->pm.mutex);
 	mutex_lock(&rdev->pm.mutex);
 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 		if (strncmp("default", buf, strlen("default")) == 0)
 		if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	struct radeon_device *rdev = ddev->dev_private;
 
 
+	/* Can't set method when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+		count = -EINVAL;
+		goto fail;
+	}
+
 	/* we don't support the legacy modes with dpm */
 	/* we don't support the legacy modes with dpm */
 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
 		count = -EINVAL;
 		count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
 	struct radeon_device *rdev = ddev->dev_private;
 	struct radeon_device *rdev = ddev->dev_private;
 	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
 
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return snprintf(buf, PAGE_SIZE, "off\n");
+
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
 			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
 			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = ddev->dev_private;
 	struct radeon_device *rdev = ddev->dev_private;
 
 
+	/* Can't set dpm state when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return -EINVAL;
+
 	mutex_lock(&rdev->pm.mutex);
 	mutex_lock(&rdev->pm.mutex);
 	if (strncmp("battery", buf, strlen("battery")) == 0)
 	if (strncmp("battery", buf, strlen("battery")) == 0)
 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
 		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
 	struct radeon_device *rdev = ddev->dev_private;
 	struct radeon_device *rdev = ddev->dev_private;
 	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
 
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return snprintf(buf, PAGE_SIZE, "off\n");
+
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
 			(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
 			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
 			(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
 	enum radeon_dpm_forced_level level;
 	enum radeon_dpm_forced_level level;
 	int ret = 0;
 	int ret = 0;
 
 
+	/* Can't force performance level when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return -EINVAL;
+
 	mutex_lock(&rdev->pm.mutex);
 	mutex_lock(&rdev->pm.mutex);
 	if (strncmp("low", buf, strlen("low")) == 0) {
 	if (strncmp("low", buf, strlen("low")) == 0) {
 		level = RADEON_DPM_FORCED_LEVEL_LOW;
 		level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
 				      char *buf)
 				      char *buf)
 {
 {
 	struct radeon_device *rdev = dev_get_drvdata(dev);
 	struct radeon_device *rdev = dev_get_drvdata(dev);
+	struct drm_device *ddev = rdev->ddev;
 	int temp;
 	int temp;
 
 
+	/* Can't get temperature when the card is off */
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+		return -EINVAL;
+
 	if (rdev->asic->pm.get_temperature)
 	if (rdev->asic->pm.get_temperature)
 		temp = radeon_get_temperature(rdev);
 		temp = radeon_get_temperature(rdev);
 	else
 	else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_device *rdev = dev->dev_private;
+	struct drm_device *ddev = rdev->ddev;
 
 
-	if (rdev->pm.dpm_enabled) {
+	if  ((rdev->flags & RADEON_IS_PX) &&
+	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+		seq_printf(m, "PX asic powered off\n");
+	} else if (rdev->pm.dpm_enabled) {
 		mutex_lock(&rdev->pm.mutex);
 		mutex_lock(&rdev->pm.mutex);
 		if (rdev->asic->dpm.debugfs_print_current_performance_level)
 		if (rdev->asic->dpm.debugfs_print_current_performance_level)
 			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
 			radeon_dpm_debugfs_print_current_performance_level(rdev, m);

+ 100 - 30
drivers/gpu/drm/radeon/radeon_vce.c

@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
  * @p: parser context
  * @p: parser context
  * @lo: address of lower dword
  * @lo: address of lower dword
  * @hi: address of higher dword
  * @hi: address of higher dword
+ * @size: size of checker for relocation buffer
  *
  *
  * Patch relocation inside command stream with real buffer address
  * Patch relocation inside command stream with real buffer address
  */
  */
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+			unsigned size)
 {
 {
 	struct radeon_cs_chunk *relocs_chunk;
 	struct radeon_cs_chunk *relocs_chunk;
-	uint64_t offset;
+	struct radeon_cs_reloc *reloc;
+	uint64_t start, end, offset;
 	unsigned idx;
 	unsigned idx;
 
 
 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,14 +465,59 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+	reloc = p->relocs_ptr[(idx / 4)];
+	start = reloc->gpu_offset;
+	end = start + radeon_bo_size(reloc->robj);
+	start += offset;
 
 
-        p->ib.ptr[lo] = offset & 0xFFFFFFFF;
-        p->ib.ptr[hi] = offset >> 32;
+	p->ib.ptr[lo] = start & 0xFFFFFFFF;
+	p->ib.ptr[hi] = start >> 32;
+
+	if (end <= start) {
+		DRM_ERROR("invalid reloc offset %llX!\n", offset);
+		return -EINVAL;
+	}
+	if ((end - start) < size) {
+		DRM_ERROR("buffer to small (%d / %d)!\n",
+			(unsigned)(end - start), size);
+		return -EINVAL;
+	}
 
 
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+	unsigned i;
+
+	/* validate the handle */
+	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+		if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+			return i;
+	}
+
+	/* handle not found try to alloc a new one */
+	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+		if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+			p->rdev->vce.filp[i] = p->filp;
+			p->rdev->vce.img_size[i] = 0;
+			return i;
+		}
+	}
+
+	DRM_ERROR("No more free VCE handles!\n");
+	return -EINVAL;
+}
+
 /**
 /**
  * radeon_vce_cs_parse - parse and validate the command stream
  * radeon_vce_cs_parse - parse and validate the command stream
  *
  *
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
  */
  */
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 {
 {
-	uint32_t handle = 0;
-	bool destroy = false;
+	int session_idx = -1;
+	bool destroyed = false;
+	uint32_t tmp, handle = 0;
+	uint32_t *size = &tmp;
 	int i, r;
 	int i, r;
 
 
 	while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
 	while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                 	return -EINVAL;
                 	return -EINVAL;
 		}
 		}
 
 
+		if (destroyed) {
+			DRM_ERROR("No other command allowed after destroy!\n");
+			return -EINVAL;
+		}
+
 		switch (cmd) {
 		switch (cmd) {
 		case 0x00000001: // session
 		case 0x00000001: // session
 			handle = radeon_get_ib_value(p, p->idx + 2);
 			handle = radeon_get_ib_value(p, p->idx + 2);
+			session_idx = radeon_vce_validate_handle(p, handle);
+			if (session_idx < 0)
+				return session_idx;
+			size = &p->rdev->vce.img_size[session_idx];
 			break;
 			break;
 
 
 		case 0x00000002: // task info
 		case 0x00000002: // task info
+			break;
+
 		case 0x01000001: // create
 		case 0x01000001: // create
+			*size = radeon_get_ib_value(p, p->idx + 8) *
+				radeon_get_ib_value(p, p->idx + 10) *
+				8 * 3 / 2;
+			break;
+
 		case 0x04000001: // config extension
 		case 0x04000001: // config extension
 		case 0x04000002: // pic control
 		case 0x04000002: // pic control
 		case 0x04000005: // rate control
 		case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 			break;
 			break;
 
 
 		case 0x03000001: // encode
 		case 0x03000001: // encode
-			r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+			r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+						*size);
 			if (r)
 			if (r)
 				return r;
 				return r;
 
 
-			r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+			r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+						*size / 3);
 			if (r)
 			if (r)
 				return r;
 				return r;
 			break;
 			break;
 
 
 		case 0x02000001: // destroy
 		case 0x02000001: // destroy
-			destroy = true;
+			destroyed = true;
 			break;
 			break;
 
 
 		case 0x05000001: // context buffer
 		case 0x05000001: // context buffer
+			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+						*size * 2);
+			if (r)
+				return r;
+			break;
+
 		case 0x05000004: // video bitstream buffer
 		case 0x05000004: // video bitstream buffer
+			tmp = radeon_get_ib_value(p, p->idx + 4);
+			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+						tmp);
+			if (r)
+				return r;
+			break;
+
 		case 0x05000005: // feedback buffer
 		case 0x05000005: // feedback buffer
-			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+			r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+						4096);
 			if (r)
 			if (r)
 				return r;
 				return r;
 			break;
 			break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
+		if (session_idx == -1) {
+			DRM_ERROR("no session command at start of IB\n");
+			return -EINVAL;
+		}
+
 		p->idx += len / 4;
 		p->idx += len / 4;
 	}
 	}
 
 
-	if (destroy) {
+	if (destroyed) {
 		/* IB contains a destroy msg, free the handle */
 		/* IB contains a destroy msg, free the handle */
 		for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
 		for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
 			atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
 			atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
-		return 0;
-        }
-
-	/* create or encode, validate the handle */
-	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-		if (atomic_read(&p->rdev->vce.handles[i]) == handle)
-			return 0;
 	}
 	}
 
 
-	/* handle not found try to alloc a new one */
-	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-		if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
-			p->rdev->vce.filp[i] = p->filp;
-			return 0;
-		}
-	}
-
-	DRM_ERROR("No more free VCE handles!\n");
-	return -EINVAL;
+	return 0;
 }
 }
 
 
 /**
 /**

+ 1 - 1
drivers/gpu/drm/radeon/radeon_vm.c

@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
 	ndw = 64;
 	ndw = 64;
 
 
 	/* assume the worst case */
 	/* assume the worst case */
-	ndw += vm->max_pde_used * 12;
+	ndw += vm->max_pde_used * 16;
 
 
 	/* update too big for an IB */
 	/* update too big for an IB */
 	if (ndw > 0xfffff)
 	if (ndw > 0xfffff)

+ 2 - 2
drivers/gpu/drm/radeon/sid.h

@@ -107,8 +107,8 @@
 #define		SPLL_CHG_STATUS				(1 << 1)
 #define		SPLL_CHG_STATUS				(1 << 1)
 #define	SPLL_CNTL_MODE					0x618
 #define	SPLL_CNTL_MODE					0x618
 #define		SPLL_SW_DIR_CONTROL			(1 << 0)
 #define		SPLL_SW_DIR_CONTROL			(1 << 0)
-#	define SPLL_REFCLK_SEL(x)			((x) << 8)
-#	define SPLL_REFCLK_SEL_MASK			0xFF00
+#	define SPLL_REFCLK_SEL(x)			((x) << 26)
+#	define SPLL_REFCLK_SEL_MASK			(3 << 26)
 
 
 #define	CG_SPLL_SPREAD_SPECTRUM				0x620
 #define	CG_SPLL_SPREAD_SPECTRUM				0x620
 #define		SSEN					(1 << 0)
 #define		SSEN					(1 << 0)

+ 1 - 1
drivers/hwmon/Kconfig

@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
 
 
 config SENSORS_NTC_THERMISTOR
 config SENSORS_NTC_THERMISTOR
 	tristate "NTC thermistor support"
 	tristate "NTC thermistor support"
-	depends on (!OF && !IIO) || (OF && IIO)
+	depends on !OF || IIO=n || IIO
 	help
 	help
 	  This driver supports NTC thermistors sensor reading and its
 	  This driver supports NTC thermistors sensor reading and its
 	  interpretation. The driver can also monitor the temperature and
 	  interpretation. The driver can also monitor the temperature and

+ 9 - 6
drivers/hwmon/ntc_thermistor.c

@@ -44,6 +44,7 @@ struct ntc_compensation {
 	unsigned int	ohm;
 	unsigned int	ohm;
 };
 };
 
 
+/* Order matters, ntc_match references the entries by index */
 static const struct platform_device_id ntc_thermistor_id[] = {
 static const struct platform_device_id ntc_thermistor_id[] = {
 	{ "ncp15wb473", TYPE_NCPXXWB473 },
 	{ "ncp15wb473", TYPE_NCPXXWB473 },
 	{ "ncp18wb473", TYPE_NCPXXWB473 },
 	{ "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
 	char name[PLATFORM_NAME_SIZE];
 	char name[PLATFORM_NAME_SIZE];
 };
 };
 
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 {
 {
 	struct iio_channel *channel = pdata->chan;
 	struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 
 
 static const struct of_device_id ntc_match[] = {
 static const struct of_device_id ntc_match[] = {
 	{ .compatible = "ntc,ncp15wb473",
 	{ .compatible = "ntc,ncp15wb473",
-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+		.data = &ntc_thermistor_id[0] },
 	{ .compatible = "ntc,ncp18wb473",
 	{ .compatible = "ntc,ncp18wb473",
-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+		.data = &ntc_thermistor_id[1] },
 	{ .compatible = "ntc,ncp21wb473",
 	{ .compatible = "ntc,ncp21wb473",
-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+		.data = &ntc_thermistor_id[2] },
 	{ .compatible = "ntc,ncp03wb473",
 	{ .compatible = "ntc,ncp03wb473",
-		.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+		.data = &ntc_thermistor_id[3] },
 	{ .compatible = "ntc,ncp15wl333",
 	{ .compatible = "ntc,ncp15wl333",
-		.data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
+		.data = &ntc_thermistor_id[4] },
 	{ },
 	{ },
 };
 };
 MODULE_DEVICE_TABLE(of, ntc_match);
 MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
 	return NULL;
 	return NULL;
 }
 }
 
 
+#define ntc_match	NULL
+
 static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
 static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
 { }
 { }
 #endif
 #endif

+ 64 - 3
drivers/infiniband/hw/mlx4/main.c

@@ -48,6 +48,7 @@
 
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 
 
 #include "mlx4_ib.h"
 #include "mlx4_ib.h"
 #include "user.h"
 #include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
 }
 }
 #endif
 #endif
 
 
+#define MLX4_IB_INVALID_MAC	((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+			       struct net_device *dev,
+			       int port)
+{
+	u64 new_smac = 0;
+	u64 release_mac = MLX4_IB_INVALID_MAC;
+	struct mlx4_ib_qp *qp;
+
+	read_lock(&dev_base_lock);
+	new_smac = mlx4_mac_to_u64(dev->dev_addr);
+	read_unlock(&dev_base_lock);
+
+	mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+	qp = ibdev->qp1_proxy[port - 1];
+	if (qp) {
+		int new_smac_index;
+		u64 old_smac = qp->pri.smac;
+		struct mlx4_update_qp_params update_params;
+
+		if (new_smac == old_smac)
+			goto unlock;
+
+		new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+		if (new_smac_index < 0)
+			goto unlock;
+
+		update_params.smac_index = new_smac_index;
+		if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+				   &update_params)) {
+			release_mac = new_smac;
+			goto unlock;
+		}
+
+		qp->pri.smac = new_smac;
+		qp->pri.smac_index = new_smac_index;
+
+		release_mac = old_smac;
+	}
+
+unlock:
+	mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+	if (release_mac != MLX4_IB_INVALID_MAC)
+		mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
 static void mlx4_ib_get_dev_addr(struct net_device *dev,
 				 struct mlx4_ib_dev *ibdev, u8 port)
 				 struct mlx4_ib_dev *ibdev, u8 port)
 {
 {
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
 	return 0;
 	return 0;
 }
 }
 
 
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+				 struct net_device *dev,
+				 unsigned long event)
+
 {
 {
 	struct mlx4_ib_iboe *iboe;
 	struct mlx4_ib_iboe *iboe;
+	int update_qps_port = -1;
 	int port;
 	int port;
 
 
 	iboe = &ibdev->iboe;
 	iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
 		}
 		}
 		curr_master = iboe->masters[port - 1];
 		curr_master = iboe->masters[port - 1];
 
 
+		if (dev == iboe->netdevs[port - 1] &&
+		    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+		     event == NETDEV_UP || event == NETDEV_CHANGE))
+			update_qps_port = port;
+
 		if (curr_netdev) {
 		if (curr_netdev) {
 			port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
 			port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
 						IB_PORT_ACTIVE : IB_PORT_DOWN;
 						IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
 	}
 	}
 
 
 	spin_unlock(&iboe->lock);
 	spin_unlock(&iboe->lock);
+
+	if (update_qps_port > 0)
+		mlx4_ib_update_qps(ibdev, dev, update_qps_port);
 }
 }
 
 
 static int mlx4_ib_netdev_event(struct notifier_block *this,
 static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
 		return NOTIFY_DONE;
 		return NOTIFY_DONE;
 
 
 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
 	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
-	mlx4_ib_scan_netdevs(ibdev);
+	mlx4_ib_scan_netdevs(ibdev, dev, event);
 
 
 	return NOTIFY_DONE;
 	return NOTIFY_DONE;
 }
 }
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 		goto err_map;
 		goto err_map;
 
 
 	for (i = 0; i < ibdev->num_ports; ++i) {
 	for (i = 0; i < ibdev->num_ports; ++i) {
+		mutex_init(&ibdev->qp1_proxy_lock[i]);
 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
 						IB_LINK_LAYER_ETHERNET) {
 						IB_LINK_LAYER_ETHERNET) {
 			err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
 			err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 		for (i = 1 ; i <= ibdev->num_ports ; ++i)
 		for (i = 1 ; i <= ibdev->num_ports ; ++i)
 			reset_gid_table(ibdev, i);
 			reset_gid_table(ibdev, i);
 		rtnl_lock();
 		rtnl_lock();
-		mlx4_ib_scan_netdevs(ibdev);
+		mlx4_ib_scan_netdevs(ibdev, NULL, 0);
 		rtnl_unlock();
 		rtnl_unlock();
 		mlx4_ib_init_gid_table(ibdev);
 		mlx4_ib_init_gid_table(ibdev);
 	}
 	}

+ 3 - 0
drivers/infiniband/hw/mlx4/mlx4_ib.h

@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
 	int steer_qpn_count;
 	int steer_qpn_count;
 	int steer_qpn_base;
 	int steer_qpn_base;
 	int steering_support;
 	int steering_support;
+	struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
+	/* lock when destroying qp1_proxy and getting netdev events */
+	struct mutex		qp1_proxy_lock[MLX4_MAX_PORTS];
 };
 };
 
 
 struct ib_event_work {
 struct ib_event_work {

+ 8 - 0
drivers/infiniband/hw/mlx4/qp.c

@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
 	if (is_qp0(dev, mqp))
 	if (is_qp0(dev, mqp))
 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
 
+	if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+		mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+		dev->qp1_proxy[mqp->port - 1] = NULL;
+		mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+	}
+
 	pd = get_pd(mqp);
 	pd = get_pd(mqp);
 	destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 	destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 
 
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 				err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
 				err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
 				if (err)
 				if (err)
 					return -EINVAL;
 					return -EINVAL;
+				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+					dev->qp1_proxy[qp->port - 1] = qp;
 			}
 			}
 		}
 		}
 	}
 	}

+ 38 - 16
drivers/net/bonding/bond_alb.c

@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
 }
 }
 
 
 /* Forward declaration */
 /* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+				      bool strict_match);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
 
 	bond->alb_info.rlb_promisc_timeout_counter = 0;
 	bond->alb_info.rlb_promisc_timeout_counter = 0;
 
 
-	alb_send_learning_packets(bond->curr_active_slave, addr);
+	alb_send_learning_packets(bond->curr_active_slave, addr, true);
 }
 }
 
 
 /* slave being removed should not be active at this point
 /* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 /*********************** tlb/rlb shared functions *********************/
 /*********************** tlb/rlb shared functions *********************/
 
 
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-			    u16 vid)
+			    __be16 vlan_proto, u16 vid)
 {
 {
 	struct learning_pkt pkt;
 	struct learning_pkt pkt;
 	struct sk_buff *skb;
 	struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
 	skb->dev = slave->dev;
 	skb->dev = slave->dev;
 
 
 	if (vid) {
 	if (vid) {
-		skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+		skb = vlan_put_tag(skb, vlan_proto, vid);
 		if (!skb) {
 		if (!skb) {
 			pr_err("%s: Error: failed to insert VLAN tag\n",
 			pr_err("%s: Error: failed to insert VLAN tag\n",
 			       slave->bond->dev->name);
 			       slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
 	dev_queue_xmit(skb);
 	dev_queue_xmit(skb);
 }
 }
 
 
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+				      bool strict_match)
 {
 {
 	struct bonding *bond = bond_get_bond_by_slave(slave);
 	struct bonding *bond = bond_get_bond_by_slave(slave);
 	struct net_device *upper;
 	struct net_device *upper;
 	struct list_head *iter;
 	struct list_head *iter;
 
 
 	/* send untagged */
 	/* send untagged */
-	alb_send_lp_vid(slave, mac_addr, 0);
+	alb_send_lp_vid(slave, mac_addr, 0, 0);
 
 
 	/* loop through vlans and send one packet for each */
 	/* loop through vlans and send one packet for each */
 	rcu_read_lock();
 	rcu_read_lock();
 	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
 	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-		if (upper->priv_flags & IFF_802_1Q_VLAN)
-			alb_send_lp_vid(slave, mac_addr,
-					vlan_dev_vlan_id(upper));
+		if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+			if (strict_match &&
+			    ether_addr_equal_64bits(mac_addr,
+						    upper->dev_addr)) {
+				alb_send_lp_vid(slave, mac_addr,
+						vlan_dev_vlan_proto(upper),
+						vlan_dev_vlan_id(upper));
+			} else if (!strict_match) {
+				alb_send_lp_vid(slave, upper->dev_addr,
+						vlan_dev_vlan_proto(upper),
+						vlan_dev_vlan_id(upper));
+			}
+		}
 	}
 	}
 	rcu_read_unlock();
 	rcu_read_unlock();
 }
 }
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 
 
 	/* fasten the change in the switch */
 	/* fasten the change in the switch */
 	if (SLAVE_IS_OK(slave1)) {
 	if (SLAVE_IS_OK(slave1)) {
-		alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+		alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
 		if (bond->alb_info.rlb_enabled) {
 		if (bond->alb_info.rlb_enabled) {
 			/* inform the clients that the mac address
 			/* inform the clients that the mac address
 			 * has changed
 			 * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 	}
 	}
 
 
 	if (SLAVE_IS_OK(slave2)) {
 	if (SLAVE_IS_OK(slave2)) {
-		alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+		alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
 		if (bond->alb_info.rlb_enabled) {
 		if (bond->alb_info.rlb_enabled) {
 			/* inform the clients that the mac address
 			/* inform the clients that the mac address
 			 * has changed
 			 * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
 
 
 	/* send learning packets */
 	/* send learning packets */
 	if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
 	if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+		bool strict_match;
+
 		/* change of curr_active_slave involves swapping of mac addresses.
 		/* change of curr_active_slave involves swapping of mac addresses.
 		 * in order to avoid this swapping from happening while
 		 * in order to avoid this swapping from happening while
 		 * sending the learning packets, the curr_slave_lock must be held for
 		 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
 		 */
 		 */
 		read_lock(&bond->curr_slave_lock);
 		read_lock(&bond->curr_slave_lock);
 
 
-		bond_for_each_slave_rcu(bond, slave, iter)
-			alb_send_learning_packets(slave, slave->dev->dev_addr);
+		bond_for_each_slave_rcu(bond, slave, iter) {
+			/* If updating current_active, use all currently
+			 * user mac addreses (!strict_match).  Otherwise, only
+			 * use mac of the slave device.
+			 */
+			strict_match = (slave != bond->curr_active_slave);
+			alb_send_learning_packets(slave, slave->dev->dev_addr,
+						  strict_match);
+		}
 
 
 		read_unlock(&bond->curr_slave_lock);
 		read_unlock(&bond->curr_slave_lock);
 
 
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 	} else {
 	} else {
 		/* set the new_slave to the bond mac address */
 		/* set the new_slave to the bond mac address */
 		alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
 		alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-		alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+		alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+					  false);
 	}
 	}
 
 
 	write_lock_bh(&bond->curr_slave_lock);
 	write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
 		alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 		alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
 
 		read_lock(&bond->lock);
 		read_lock(&bond->lock);
-		alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+		alb_send_learning_packets(bond->curr_active_slave,
+					  bond_dev->dev_addr, false);
 		if (bond->alb_info.rlb_enabled) {
 		if (bond->alb_info.rlb_enabled) {
 			/* inform clients mac address has changed */
 			/* inform clients mac address has changed */
 			rlb_req_update_slave_clients(bond, bond->curr_active_slave);
 			rlb_req_update_slave_clients(bond, bond->curr_active_slave);

+ 65 - 69
drivers/net/bonding/bond_main.c

@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  */
  */
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
 			  __be32 dest_ip, __be32 src_ip,
 			  __be32 dest_ip, __be32 src_ip,
-			  struct bond_vlan_tag *inner,
-			  struct bond_vlan_tag *outer)
+			  struct bond_vlan_tag *tags)
 {
 {
 	struct sk_buff *skb;
 	struct sk_buff *skb;
+	int i;
 
 
 	pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
 	pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
 		 arp_op, slave_dev->name, &dest_ip, &src_ip);
 		 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
 		net_err_ratelimited("ARP packet allocation failed\n");
 		net_err_ratelimited("ARP packet allocation failed\n");
 		return;
 		return;
 	}
 	}
-	if (outer->vlan_id) {
-		if (inner->vlan_id) {
-			pr_debug("inner tag: proto %X vid %X\n",
-				 ntohs(inner->vlan_proto), inner->vlan_id);
-			skb = __vlan_put_tag(skb, inner->vlan_proto,
-					     inner->vlan_id);
-			if (!skb) {
-				net_err_ratelimited("failed to insert inner VLAN tag\n");
-				return;
-			}
-		}
 
 
-		pr_debug("outer reg: proto %X vid %X\n",
-			 ntohs(outer->vlan_proto), outer->vlan_id);
-		skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+	/* Go through all the tags backwards and add them to the packet */
+	for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+		if (!tags[i].vlan_id)
+			continue;
+
+		pr_debug("inner tag: proto %X vid %X\n",
+			 ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+		skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+				     tags[i].vlan_id);
+		if (!skb) {
+			net_err_ratelimited("failed to insert inner VLAN tag\n");
+			return;
+		}
+	}
+	/* Set the outer tag */
+	if (tags[0].vlan_id) {
+		pr_debug("outer tag: proto %X vid %X\n",
+			 ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+		skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
 		if (!skb) {
 		if (!skb) {
 			net_err_ratelimited("failed to insert outer VLAN tag\n");
 			net_err_ratelimited("failed to insert outer VLAN tag\n");
 			return;
 			return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
 	arp_xmit(skb);
 	arp_xmit(skb);
 }
 }
 
 
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+				    struct net_device *end_dev,
+				    struct bond_vlan_tag *tags)
+{
+	struct net_device *upper;
+	struct list_head  *iter;
+	int  idx;
+
+	if (start_dev == end_dev)
+		return true;
+
+	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+		if (bond_verify_device_path(upper, end_dev, tags)) {
+			if (is_vlan_dev(upper)) {
+				idx = vlan_get_encap_level(upper);
+				if (idx >= BOND_MAX_VLAN_ENCAP)
+					return false;
+
+				tags[idx].vlan_proto =
+						    vlan_dev_vlan_proto(upper);
+				tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+			}
+			return true;
+		}
+	}
+
+	return false;
+}
 
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
 {
-	struct net_device *upper, *vlan_upper;
-	struct list_head *iter, *vlan_iter;
 	struct rtable *rt;
 	struct rtable *rt;
-	struct bond_vlan_tag inner, outer;
+	struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
 	__be32 *targets = bond->params.arp_targets, addr;
 	__be32 *targets = bond->params.arp_targets, addr;
 	int i;
 	int i;
+	bool ret;
 
 
 	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
 	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
 		pr_debug("basa: target %pI4\n", &targets[i]);
 		pr_debug("basa: target %pI4\n", &targets[i]);
-		inner.vlan_proto = 0;
-		inner.vlan_id = 0;
-		outer.vlan_proto = 0;
-		outer.vlan_id = 0;
+		memset(tags, 0, sizeof(tags));
 
 
 		/* Find out through which dev should the packet go */
 		/* Find out through which dev should the packet go */
 		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
 		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
 				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
 						     bond->dev->name,
 						     bond->dev->name,
 						     &targets[i]);
 						     &targets[i]);
-			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+				      0, tags);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 			goto found;
 			goto found;
 
 
 		rcu_read_lock();
 		rcu_read_lock();
-		/* first we search only for vlan devices. for every vlan
-		 * found we verify its upper dev list, searching for the
-		 * rt->dst.dev. If found we save the tag of the vlan and
-		 * proceed to send the packet.
-		 */
-		netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-						  vlan_iter) {
-			if (!is_vlan_dev(vlan_upper))
-				continue;
-
-			if (vlan_upper == rt->dst.dev) {
-				outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-				outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-				rcu_read_unlock();
-				goto found;
-			}
-			netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-							  iter) {
-				if (upper == rt->dst.dev) {
-					/* If the upper dev is a vlan dev too,
-					 *  set the vlan tag to inner tag.
-					 */
-					if (is_vlan_dev(upper)) {
-						inner.vlan_proto = vlan_dev_vlan_proto(upper);
-						inner.vlan_id = vlan_dev_vlan_id(upper);
-					}
-					outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-					outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-					rcu_read_unlock();
-					goto found;
-				}
-			}
-		}
-
-		/* if the device we're looking for is not on top of any of
-		 * our upper vlans, then just search for any dev that
-		 * matches, and in case it's a vlan - save the id
-		 */
-		netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-			if (upper == rt->dst.dev) {
-				rcu_read_unlock();
-				goto found;
-			}
-		}
+		ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
 		rcu_read_unlock();
 		rcu_read_unlock();
 
 
+		if (ret)
+			goto found;
+
 		/* Not our device - skip */
 		/* Not our device - skip */
 		pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
 		pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
 			 bond->dev->name, &targets[i],
 			 bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
 		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
 		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
 		ip_rt_put(rt);
 		ip_rt_put(rt);
 		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
 		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-			      addr, &inner, &outer);
+			      addr, tags);
 	}
 	}
 }
 }
 
 

+ 1 - 0
drivers/net/bonding/bond_options.c

@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
 	{ "off",     0,       BOND_VALFLAG_DEFAULT},
 	{ "off",     0,       BOND_VALFLAG_DEFAULT},
 	{ "maxval",  INT_MAX, BOND_VALFLAG_MAX},
 	{ "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+	{ NULL,      -1,      0}
 };
 };
 
 
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {

+ 1 - 0
drivers/net/bonding/bonding.h

@@ -36,6 +36,7 @@
 
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
 
+#define BOND_MAX_VLAN_ENCAP	2
 #define BOND_MAX_ARP_TARGETS	16
 #define BOND_MAX_ARP_TARGETS	16
 
 
 #define BOND_DEFAULT_MIIMON	100
 #define BOND_DEFAULT_MIIMON	100

+ 0 - 7
drivers/net/can/c_can/Kconfig

@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
 	  SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
 	  SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
 	  boards like am335x, dm814x, dm813x and dm811x.
 	  boards like am335x, dm814x, dm813x and dm811x.
 
 
-config CAN_C_CAN_STRICT_FRAME_ORDERING
-	bool "Force a strict RX CAN frame order (may cause frame loss)"
-	---help---
-	  The RX split buffer prevents packet reordering but can cause packet
-	  loss. Only enable this option when you accept to lose CAN frames
-	  in favour of getting the received CAN frames in the correct order.
-
 config CAN_C_CAN_PCI
 config CAN_C_CAN_PCI
 	tristate "Generic PCI Bus based C_CAN/D_CAN driver"
 	tristate "Generic PCI Bus based C_CAN/D_CAN driver"
 	depends on PCI
 	depends on PCI

+ 0 - 36
drivers/net/can/c_can/c_can.c

@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
 static inline void c_can_rx_object_get(struct net_device *dev,
 static inline void c_can_rx_object_get(struct net_device *dev,
 				       struct c_can_priv *priv, u32 obj)
 				       struct c_can_priv *priv, u32 obj)
 {
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-	if (obj < C_CAN_MSG_RX_LOW_LAST)
-		c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
-	else
-#endif
 		c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
 		c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
 }
 }
 
 
 static inline void c_can_rx_finalize(struct net_device *dev,
 static inline void c_can_rx_finalize(struct net_device *dev,
 				     struct c_can_priv *priv, u32 obj)
 				     struct c_can_priv *priv, u32 obj)
 {
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-	if (obj < C_CAN_MSG_RX_LOW_LAST)
-		priv->rxmasked |= BIT(obj - 1);
-	else if (obj == C_CAN_MSG_RX_LOW_LAST) {
-		priv->rxmasked = 0;
-		/* activate all lower message objects */
-		c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
-	}
-#endif
 	if (priv->type != BOSCH_D_CAN)
 	if (priv->type != BOSCH_D_CAN)
 		c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
 		c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
 }
 }
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
 {
 {
 	u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
 	u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
 
 
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-	pend &= ~priv->rxmasked;
-#endif
 	return pend;
 	return pend;
 }
 }
 
 
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
  * has arrived. To work-around this issue, we keep two groups of message
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
  *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- *   the INTPND bit.
- * - if the current message object number is equal to
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- *   receive message objects.
- * - if the current message object number is greater than
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- *   only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
  * We clear the newdat bit right away.
  * We clear the newdat bit right away.
  *
  *
  * This can result in packet reordering when the readout is slow.
  * This can result in packet reordering when the readout is slow.

+ 9 - 5
drivers/net/can/sja1000/peak_pci.c

@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 {
 	struct sja1000_priv *priv;
 	struct sja1000_priv *priv;
 	struct peak_pci_chan *chan;
 	struct peak_pci_chan *chan;
-	struct net_device *dev;
+	struct net_device *dev, *prev_dev;
 	void __iomem *cfg_base, *reg_base;
 	void __iomem *cfg_base, *reg_base;
 	u16 sub_sys_id, icr;
 	u16 sub_sys_id, icr;
 	int i, err, channels;
 	int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
 	writew(0x0, cfg_base + PITA_ICR + 2);
 	writew(0x0, cfg_base + PITA_ICR + 2);
 
 
 	chan = NULL;
 	chan = NULL;
-	for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-		unregister_sja1000dev(dev);
-		free_sja1000dev(dev);
+	for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
 		priv = netdev_priv(dev);
 		priv = netdev_priv(dev);
 		chan = priv->priv;
 		chan = priv->priv;
+		prev_dev = chan->prev_dev;
+
+		unregister_sja1000dev(dev);
+		free_sja1000dev(dev);
 	}
 	}
 
 
 	/* free any PCIeC resources too */
 	/* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
 
 	/* Loop over all registered devices */
 	/* Loop over all registered devices */
 	while (1) {
 	while (1) {
+		struct net_device *prev_dev = chan->prev_dev;
+
 		dev_info(&pdev->dev, "removing device %s\n", dev->name);
 		dev_info(&pdev->dev, "removing device %s\n", dev->name);
 		unregister_sja1000dev(dev);
 		unregister_sja1000dev(dev);
 		free_sja1000dev(dev);
 		free_sja1000dev(dev);
-		dev = chan->prev_dev;
+		dev = prev_dev;
 
 
 		if (!dev) {
 		if (!dev) {
 			/* do that only for first channel */
 			/* do that only for first channel */

+ 12 - 0
drivers/net/ethernet/Kconfig

@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+	tristate "Beckhoff CX5020 EtherCAT master support"
+	depends on PCI
+	---help---
+	  Driver for EtherCAT master module located on CCAT FPGA
+	  that can be found on Beckhoff CX5020, and possibly other of CX
+	  Beckhoff CX series industrial PCs.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called ec_bhf.
+
 source "drivers/net/ethernet/davicom/Kconfig"
 source "drivers/net/ethernet/davicom/Kconfig"
 
 
 config DNET
 config DNET

+ 1 - 0
drivers/net/ethernet/Makefile

@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/

+ 1 - 0
drivers/net/ethernet/altera/Makefile

@@ -5,3 +5,4 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
 altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__

+ 55 - 55
drivers/net/ethernet/altera/altera_msgdma.c

@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
 void msgdma_reset(struct altera_tse_private *priv)
 void msgdma_reset(struct altera_tse_private *priv)
 {
 {
 	int counter;
 	int counter;
-	struct msgdma_csr *txcsr =
-		(struct msgdma_csr *)priv->tx_dma_csr;
-	struct msgdma_csr *rxcsr =
-		(struct msgdma_csr *)priv->rx_dma_csr;
 
 
 	/* Reset Rx mSGDMA */
 	/* Reset Rx mSGDMA */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
-	iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+		msgdma_csroffs(status));
+	csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+		msgdma_csroffs(control));
 
 
 	counter = 0;
 	counter = 0;
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		if (tse_bit_is_clear(&rxcsr->status,
+		if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
 				     MSGDMA_CSR_STAT_RESETTING))
 				     MSGDMA_CSR_STAT_RESETTING))
 			break;
 			break;
 		udelay(1);
 		udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
 			   "TSE Rx mSGDMA resetting bit never cleared!\n");
 			   "TSE Rx mSGDMA resetting bit never cleared!\n");
 
 
 	/* clear all status bits */
 	/* clear all status bits */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
 
 
 	/* Reset Tx mSGDMA */
 	/* Reset Tx mSGDMA */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
-	iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+		msgdma_csroffs(status));
+
+	csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+		msgdma_csroffs(control));
 
 
 	counter = 0;
 	counter = 0;
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		if (tse_bit_is_clear(&txcsr->status,
+		if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
 				     MSGDMA_CSR_STAT_RESETTING))
 				     MSGDMA_CSR_STAT_RESETTING))
 			break;
 			break;
 		udelay(1);
 		udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
 			   "TSE Tx mSGDMA resetting bit never cleared!\n");
 			   "TSE Tx mSGDMA resetting bit never cleared!\n");
 
 
 	/* clear all status bits */
 	/* clear all status bits */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 }
 
 
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 {
 {
-	struct msgdma_csr *csr = priv->rx_dma_csr;
-	tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+		      MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 }
 
 
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 {
 {
-	struct msgdma_csr *csr = priv->rx_dma_csr;
-	tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+		    MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 }
 
 
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 {
 {
-	struct msgdma_csr *csr = priv->tx_dma_csr;
-	tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+		      MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 }
 
 
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 {
 {
-	struct msgdma_csr *csr = priv->tx_dma_csr;
-	tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+		    MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 }
 
 
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 {
 {
-	struct msgdma_csr *csr = priv->rx_dma_csr;
-	iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+	csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
 }
 }
 
 
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 {
 {
-	struct msgdma_csr *csr = priv->tx_dma_csr;
-	iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+	csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 }
 
 
 /* return 0 to indicate transmit is pending */
 /* return 0 to indicate transmit is pending */
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
 {
-	struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
-	iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
-	iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
-	iowrite32(0, &desc->write_addr_lo);
-	iowrite32(0, &desc->write_addr_hi);
-	iowrite32(buffer->len, &desc->len);
-	iowrite32(0, &desc->burst_seq_num);
-	iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
-	iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+	csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+		msgdma_descroffs(read_addr_lo));
+	csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+		msgdma_descroffs(read_addr_hi));
+	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+	csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+	csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+		msgdma_descroffs(stride));
+	csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+		msgdma_descroffs(control));
 	return 0;
 	return 0;
 }
 }
 
 
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 	u32 ready = 0;
 	u32 ready = 0;
 	u32 inuse;
 	u32 inuse;
 	u32 status;
 	u32 status;
-	struct msgdma_csr *txcsr =
-		(struct msgdma_csr *)priv->tx_dma_csr;
 
 
 	/* Get number of sent descriptors */
 	/* Get number of sent descriptors */
-	inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+	inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+			& 0xffff;
 
 
 	if (inuse) { /* Tx FIFO is not empty */
 	if (inuse) { /* Tx FIFO is not empty */
 		ready = priv->tx_prod - priv->tx_cons - inuse - 1;
 		ready = priv->tx_prod - priv->tx_cons - inuse - 1;
 	} else {
 	} else {
 		/* Check for buffered last packet */
 		/* Check for buffered last packet */
-		status = ioread32(&txcsr->status);
+		status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
 		if (status & MSGDMA_CSR_STAT_BUSY)
 		if (status & MSGDMA_CSR_STAT_BUSY)
 			ready = priv->tx_prod - priv->tx_cons - 1;
 			ready = priv->tx_prod - priv->tx_cons - 1;
 		else
 		else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 void msgdma_add_rx_desc(struct altera_tse_private *priv,
 void msgdma_add_rx_desc(struct altera_tse_private *priv,
 			struct tse_buffer *rxbuffer)
 			struct tse_buffer *rxbuffer)
 {
 {
-	struct msgdma_extended_desc *desc = priv->rx_dma_desc;
 	u32 len = priv->rx_dma_buf_sz;
 	u32 len = priv->rx_dma_buf_sz;
 	dma_addr_t dma_addr = rxbuffer->dma_addr;
 	dma_addr_t dma_addr = rxbuffer->dma_addr;
 	u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
 	u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
 			| MSGDMA_DESC_CTL_TR_ERR_IRQ
 			| MSGDMA_DESC_CTL_TR_ERR_IRQ
 			| MSGDMA_DESC_CTL_GO);
 			| MSGDMA_DESC_CTL_GO);
 
 
-	iowrite32(0, &desc->read_addr_lo);
-	iowrite32(0, &desc->read_addr_hi);
-	iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
-	iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
-	iowrite32(len, &desc->len);
-	iowrite32(0, &desc->burst_seq_num);
-	iowrite32(0x00010001, &desc->stride);
-	iowrite32(control, &desc->control);
+	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+	csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+		msgdma_descroffs(write_addr_lo));
+	csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+		msgdma_descroffs(write_addr_hi));
+	csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+	csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+	csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
 }
 }
 
 
 /* status is returned on upper 16 bits,
 /* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
 	u32 rxstatus = 0;
 	u32 rxstatus = 0;
 	u32 pktlength;
 	u32 pktlength;
 	u32 pktstatus;
 	u32 pktstatus;
-	struct msgdma_csr *rxcsr =
-		(struct msgdma_csr *)priv->rx_dma_csr;
-	struct msgdma_response *rxresp =
-		(struct msgdma_response *)priv->rx_dma_resp;
-
-	if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
-		pktlength = ioread32(&rxresp->bytes_transferred);
-		pktstatus = ioread32(&rxresp->status);
+
+	if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+	    & 0xffff) {
+		pktlength = csrrd32(priv->rx_dma_resp,
+				    msgdma_respoffs(bytes_transferred));
+		pktstatus = csrrd32(priv->rx_dma_resp,
+				    msgdma_respoffs(status));
 		rxstatus = pktstatus;
 		rxstatus = pktstatus;
 		rxstatus = rxstatus << 16;
 		rxstatus = rxstatus << 16;
 		rxstatus |= (pktlength & 0xffff);
 		rxstatus |= (pktlength & 0xffff);

+ 4 - 9
drivers/net/ethernet/altera/altera_msgdmahw.h

@@ -17,15 +17,6 @@
 #ifndef __ALTERA_MSGDMAHW_H__
 #ifndef __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 
 
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
-	u32 read_addr;	/* data buffer source address */
-	u32 write_addr;	/* data buffer destination address */
-	u32 len;	/* the number of bytes to transfer per descriptor */
-	u32 control;	/* characteristics of the transfer */
-};
-
 /* mSGDMA extended descriptor format
 /* mSGDMA extended descriptor format
  */
  */
 struct msgdma_extended_desc {
 struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
 	u32 status;
 	u32 status;
 };
 };
 
 
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
 /* mSGDMA response register bit definitions
 /* mSGDMA response register bit definitions
  */
  */
 #define MSGDMA_RESP_EARLY_TERM	BIT(8)
 #define MSGDMA_RESP_EARLY_TERM	BIT(8)

+ 90 - 91
drivers/net/ethernet/altera/altera_sgdma.c

@@ -20,8 +20,8 @@
 #include "altera_sgdmahw.h"
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 #include "altera_sgdma.h"
 
 
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-				struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+				struct sgdma_descrip __iomem *ndesc,
 				dma_addr_t ndesc_phys,
 				dma_addr_t ndesc_phys,
 				dma_addr_t raddr,
 				dma_addr_t raddr,
 				dma_addr_t waddr,
 				dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
 				int wfixed);
 				int wfixed);
 
 
 static int sgdma_async_write(struct altera_tse_private *priv,
 static int sgdma_async_write(struct altera_tse_private *priv,
-			      struct sgdma_descrip *desc);
+			      struct sgdma_descrip __iomem *desc);
 
 
 static int sgdma_async_read(struct altera_tse_private *priv);
 static int sgdma_async_read(struct altera_tse_private *priv);
 
 
 static dma_addr_t
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
 sgdma_txphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc);
+		 struct sgdma_descrip __iomem *desc);
 
 
 static dma_addr_t
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc);
+		 struct sgdma_descrip __iomem *desc);
 
 
 static int sgdma_txbusy(struct altera_tse_private *priv);
 static int sgdma_txbusy(struct altera_tse_private *priv);
 
 
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
 	priv->rxdescphys = (dma_addr_t) 0;
 	priv->rxdescphys = (dma_addr_t) 0;
 	priv->txdescphys = (dma_addr_t) 0;
 	priv->txdescphys = (dma_addr_t) 0;
 
 
-	priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+	priv->rxdescphys = dma_map_single(priv->device,
+					  (void __force *)priv->rx_dma_desc,
 					  priv->rxdescmem, DMA_BIDIRECTIONAL);
 					  priv->rxdescmem, DMA_BIDIRECTIONAL);
 
 
 	if (dma_mapping_error(priv->device, priv->rxdescphys)) {
 	if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+	priv->txdescphys = dma_map_single(priv->device,
+					  (void __force *)priv->tx_dma_desc,
 					  priv->txdescmem, DMA_TO_DEVICE);
 					  priv->txdescmem, DMA_TO_DEVICE);
 
 
 	if (dma_mapping_error(priv->device, priv->txdescphys)) {
 	if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
 	}
 	}
 
 
 	/* Initialize descriptor memory to all 0's, sync memory to cache */
 	/* Initialize descriptor memory to all 0's, sync memory to cache */
-	memset(priv->tx_dma_desc, 0, priv->txdescmem);
-	memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
 
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
 				   priv->txdescmem, DMA_TO_DEVICE);
 				   priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
  */
 void sgdma_reset(struct altera_tse_private *priv)
 void sgdma_reset(struct altera_tse_private *priv)
 {
 {
-	u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
-	u32 txdescriplen   = priv->txdescmem;
-	u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
-	u32 rxdescriplen   = priv->rxdescmem;
-	struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
-	struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
 	/* Initialize descriptor memory to 0 */
 	/* Initialize descriptor memory to 0 */
-	memset(ptxdescripmem, 0, txdescriplen);
-	memset(prxdescripmem, 0, rxdescriplen);
+	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
 
-	iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-	iowrite32(0, &ptxsgdma->control);
+	csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 
 
-	iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-	iowrite32(0, &prxsgdma->control);
+	csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+	csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 }
 }
 
 
 /* For SGDMA, interrupts remain enabled after initially enabling,
 /* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
 
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
 {
-	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+	tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+		    SGDMA_CTRLREG_CLRINT);
 }
 }
 
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
 {
-	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+	tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+		    SGDMA_CTRLREG_CLRINT);
 }
 }
 
 
 /* transmits buffer through SGDMA. Returns number of buffers
 /* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
  */
  */
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
 {
-	int pktstx = 0;
-	struct sgdma_descrip *descbase =
-		(struct sgdma_descrip *)priv->tx_dma_desc;
+	struct sgdma_descrip __iomem *descbase =
+		(struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 
 
-	struct sgdma_descrip *cdesc = &descbase[0];
-	struct sgdma_descrip *ndesc = &descbase[1];
+	struct sgdma_descrip __iomem *cdesc = &descbase[0];
+	struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
 
 	/* wait 'til the tx sgdma is ready for the next transmit request */
 	/* wait 'til the tx sgdma is ready for the next transmit request */
 	if (sgdma_txbusy(priv))
 	if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 			    0,				/* read fixed */
 			    0,				/* read fixed */
 			    SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */
 			    SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */
 
 
-	pktstx = sgdma_async_write(priv, cdesc);
+	sgdma_async_write(priv, cdesc);
 
 
 	/* enqueue the request to the pending transmit queue */
 	/* enqueue the request to the pending transmit queue */
 	queue_tx(priv, buffer);
 	queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
 {
 	u32 ready = 0;
 	u32 ready = 0;
-	struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
 
 
 	if (!sgdma_txbusy(priv) &&
 	if (!sgdma_txbusy(priv) &&
-	    ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+	    ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+	     & SGDMA_CONTROL_HW_OWNED) == 0) &&
 	    (dequeue_tx(priv))) {
 	    (dequeue_tx(priv))) {
 		ready = 1;
 		ready = 1;
 	}
 	}
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
 {
-	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-	struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
-	struct sgdma_descrip *desc = NULL;
-	int pktsrx;
-	unsigned int rxstatus = 0;
-	unsigned int pktlength = 0;
-	unsigned int pktstatus = 0;
+	struct sgdma_descrip __iomem *base =
+		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+	struct sgdma_descrip __iomem *desc = NULL;
 	struct tse_buffer *rxbuffer = NULL;
 	struct tse_buffer *rxbuffer = NULL;
+	unsigned int rxstatus = 0;
 
 
-	u32 sts = ioread32(&csr->status);
+	u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 
 
 	desc = &base[0];
 	desc = &base[0];
 	if (sts & SGDMA_STSREG_EOP) {
 	if (sts & SGDMA_STSREG_EOP) {
+		unsigned int pktlength = 0;
+		unsigned int pktstatus = 0;
 		dma_sync_single_for_cpu(priv->device,
 		dma_sync_single_for_cpu(priv->device,
 					priv->rxdescphys,
 					priv->rxdescphys,
 					priv->sgdmadesclen,
 					priv->sgdmadesclen,
 					DMA_FROM_DEVICE);
 					DMA_FROM_DEVICE);
 
 
-		pktlength = desc->bytes_xferred;
-		pktstatus = desc->status & 0x3f;
-		rxstatus = pktstatus;
+		pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+		pktstatus = csrrd8(desc, sgdma_descroffs(status));
+		rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
 		rxstatus = rxstatus << 16;
 		rxstatus = rxstatus << 16;
 		rxstatus |= (pktlength & 0xffff);
 		rxstatus |= (pktlength & 0xffff);
 
 
 		if (rxstatus) {
 		if (rxstatus) {
-			desc->status = 0;
+			csrwr8(0, desc, sgdma_descroffs(status));
 
 
 			rxbuffer = dequeue_rx(priv);
 			rxbuffer = dequeue_rx(priv);
 			if (rxbuffer == NULL)
 			if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 					    "sgdma rx and rx queue empty!\n");
 					    "sgdma rx and rx queue empty!\n");
 
 
 			/* Clear control */
 			/* Clear control */
-			iowrite32(0, &csr->control);
+			csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 			/* clear status */
 			/* clear status */
-			iowrite32(0xf, &csr->status);
+			csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 
 
 			/* kick the rx sgdma after reaping this descriptor */
 			/* kick the rx sgdma after reaping this descriptor */
-			pktsrx = sgdma_async_read(priv);
+			sgdma_async_read(priv);
 
 
 		} else {
 		} else {
 			/* If the SGDMA indicated an end of packet on recv,
 			/* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 			 */
 			 */
 			netdev_err(priv->dev,
 			netdev_err(priv->dev,
 				   "SGDMA RX Error Info: %x, %x, %x\n",
 				   "SGDMA RX Error Info: %x, %x, %x\n",
-				   sts, desc->status, rxstatus);
+				   sts, csrrd8(desc, sgdma_descroffs(status)),
+				   rxstatus);
 		}
 		}
 	} else if (sts == 0) {
 	} else if (sts == 0) {
-		pktsrx = sgdma_async_read(priv);
+		sgdma_async_read(priv);
 	}
 	}
 
 
 	return rxstatus;
 	return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 
 
 /* Private functions */
 /* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-				struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+				struct sgdma_descrip __iomem *ndesc,
 				dma_addr_t ndesc_phys,
 				dma_addr_t ndesc_phys,
 				dma_addr_t raddr,
 				dma_addr_t raddr,
 				dma_addr_t waddr,
 				dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
 				int wfixed)
 				int wfixed)
 {
 {
 	/* Clear the next descriptor as not owned by hardware */
 	/* Clear the next descriptor as not owned by hardware */
-	u32 ctrl = ndesc->control;
+
+	u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
 	ctrl &= ~SGDMA_CONTROL_HW_OWNED;
 	ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-	ndesc->control = ctrl;
+	csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 
 
-	ctrl = 0;
 	ctrl = SGDMA_CONTROL_HW_OWNED;
 	ctrl = SGDMA_CONTROL_HW_OWNED;
 	ctrl |= generate_eop;
 	ctrl |= generate_eop;
 	ctrl |= rfixed;
 	ctrl |= rfixed;
 	ctrl |= wfixed;
 	ctrl |= wfixed;
 
 
 	/* Channel is implicitly zero, initialized to 0 by default */
 	/* Channel is implicitly zero, initialized to 0 by default */
-
-	desc->raddr = raddr;
-	desc->waddr = waddr;
-	desc->next = lower_32_bits(ndesc_phys);
-	desc->control = ctrl;
-	desc->status = 0;
-	desc->rburst = 0;
-	desc->wburst = 0;
-	desc->bytes = length;
-	desc->bytes_xferred = 0;
+	csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+	csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+	csrwr32(0, desc, sgdma_descroffs(pad1));
+	csrwr32(0, desc, sgdma_descroffs(pad2));
+	csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+	csrwr8(ctrl, desc, sgdma_descroffs(control));
+	csrwr8(0, desc, sgdma_descroffs(status));
+	csrwr8(0, desc, sgdma_descroffs(wburst));
+	csrwr8(0, desc, sgdma_descroffs(rburst));
+	csrwr16(length, desc, sgdma_descroffs(bytes));
+	csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 }
 }
 
 
 /* If hardware is busy, don't restart async read.
 /* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
  */
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
 {
-	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-	struct sgdma_descrip *descbase =
-		(struct sgdma_descrip *)priv->rx_dma_desc;
+	struct sgdma_descrip __iomem *descbase =
+		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 
 
-	struct sgdma_descrip *cdesc = &descbase[0];
-	struct sgdma_descrip *ndesc = &descbase[1];
+	struct sgdma_descrip __iomem *cdesc = &descbase[0];
+	struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
 
 	struct tse_buffer *rxbuffer = NULL;
 	struct tse_buffer *rxbuffer = NULL;
 
 
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 					   priv->sgdmadesclen,
 					   priv->sgdmadesclen,
 					   DMA_TO_DEVICE);
 					   DMA_TO_DEVICE);
 
 
-		iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-			  &csr->next_descrip);
+		csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+			priv->rx_dma_csr,
+			sgdma_csroffs(next_descrip));
 
 
-		iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-			  &csr->control);
+		csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+			priv->rx_dma_csr,
+			sgdma_csroffs(control));
 
 
 		return 1;
 		return 1;
 	}
 	}
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 }
 }
 
 
 static int sgdma_async_write(struct altera_tse_private *priv,
 static int sgdma_async_write(struct altera_tse_private *priv,
-			     struct sgdma_descrip *desc)
+			     struct sgdma_descrip __iomem *desc)
 {
 {
-	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
 	if (sgdma_txbusy(priv))
 	if (sgdma_txbusy(priv))
 		return 0;
 		return 0;
 
 
 	/* clear control and status */
 	/* clear control and status */
-	iowrite32(0, &csr->control);
-	iowrite32(0x1f, &csr->status);
+	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+	csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
 
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
 				   priv->sgdmadesclen, DMA_TO_DEVICE);
 				   priv->sgdmadesclen, DMA_TO_DEVICE);
 
 
-	iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-		  &csr->next_descrip);
+	csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+		priv->tx_dma_csr,
+		sgdma_csroffs(next_descrip));
 
 
-	iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-		  &csr->control);
+	csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+		priv->tx_dma_csr,
+		sgdma_csroffs(control));
 
 
 	return 1;
 	return 1;
 }
 }
 
 
 static dma_addr_t
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
 sgdma_txphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc)
+		 struct sgdma_descrip __iomem *desc)
 {
 {
 	dma_addr_t paddr = priv->txdescmem_busaddr;
 	dma_addr_t paddr = priv->txdescmem_busaddr;
 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
 
 
 static dma_addr_t
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc)
+		 struct sgdma_descrip __iomem *desc)
 {
 {
 	dma_addr_t paddr = priv->rxdescmem_busaddr;
 	dma_addr_t paddr = priv->rxdescmem_busaddr;
 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
 {
-	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-	return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+	return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+		       & SGDMA_STSREG_BUSY;
 }
 }
 
 
 /* waits for the tx sgdma to finish it's current operation, returns 0
 /* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
 {
 	int delay = 0;
 	int delay = 0;
-	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
 
 
 	/* if DMA is busy, wait for current transactino to finish */
 	/* if DMA is busy, wait for current transactino to finish */
-	while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+	while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+		& SGDMA_STSREG_BUSY) && (delay++ < 100))
 		udelay(1);
 		udelay(1);
 
 
-	if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+	if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+	    & SGDMA_STSREG_BUSY) {
 		netdev_err(priv->dev, "timeout waiting for tx dma\n");
 		netdev_err(priv->dev, "timeout waiting for tx dma\n");
 		return 1;
 		return 1;
 	}
 	}

+ 14 - 12
drivers/net/ethernet/altera/altera_sgdmahw.h

@@ -19,16 +19,16 @@
 
 
 /* SGDMA descriptor structure */
 /* SGDMA descriptor structure */
 struct sgdma_descrip {
 struct sgdma_descrip {
-	unsigned int	raddr; /* address of data to be read */
-	unsigned int	pad1;
-	unsigned int	waddr;
-	unsigned int    pad2;
-	unsigned int	next;
-	unsigned int	pad3;
-	unsigned short  bytes;
-	unsigned char   rburst;
-	unsigned char	wburst;
-	unsigned short	bytes_xferred;	/* 16 bits, bytes xferred */
+	u32	raddr; /* address of data to be read */
+	u32	pad1;
+	u32	waddr;
+	u32	pad2;
+	u32	next;
+	u32	pad3;
+	u16	bytes;
+	u8	rburst;
+	u8	wburst;
+	u16	bytes_xferred;	/* 16 bits, bytes xferred */
 
 
 	/* bit 0: error
 	/* bit 0: error
 	 * bit 1: length error
 	 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
 	 * bit 6: reserved
 	 * bit 6: reserved
 	 * bit 7: status eop for recv case
 	 * bit 7: status eop for recv case
 	 */
 	 */
-	unsigned char	status;
+	u8	status;
 
 
 	/* bit 0: eop
 	/* bit 0: eop
 	 * bit 1: read_fixed
 	 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
 	 * bits 3,4,5,6: Channel (always 0)
 	 * bits 3,4,5,6: Channel (always 0)
 	 * bit 7: hardware owned
 	 * bit 7: hardware owned
 	 */
 	 */
-	unsigned char	control;
+	u8	control;
 } __packed;
 } __packed;
 
 
 
 
@@ -101,6 +101,8 @@ struct sgdma_csr {
 	u32	pad3[3];
 	u32	pad3[3];
 };
 };
 
 
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
 
 
 #define SGDMA_STSREG_ERR	BIT(0) /* Error */
 #define SGDMA_STSREG_ERR	BIT(0) /* Error */
 #define SGDMA_STSREG_EOP	BIT(1) /* EOP */
 #define SGDMA_STSREG_EOP	BIT(1) /* EOP */

+ 47 - 0
drivers/net/ethernet/altera/altera_tse.h

@@ -357,6 +357,8 @@ struct altera_tse_mac {
 	u32 reserved5[42];
 	u32 reserved5[42];
 };
 };
 
 
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
 /* Transmit and Receive Command Registers Bit Definitions
 /* Transmit and Receive Command Registers Bit Definitions
  */
  */
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC		BIT(17)
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC		BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
  */
  */
 void altera_tse_set_ethtool_ops(struct net_device *);
 void altera_tse_set_ethtool_ops(struct net_device *);
 
 
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+	return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+	return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+	return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+	writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+	writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+	writeb(val, paddr);
+}
+
 #endif /* __ALTERA_TSE_H__ */
 #endif /* __ALTERA_TSE_H__ */

+ 71 - 37
drivers/net/ethernet/altera/altera_tse_ethtool.c

@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
 			   u64 *buf)
 			   u64 *buf)
 {
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 	u64 ext;
 	u64 ext;
 
 
-	buf[0] = ioread32(&mac->frames_transmitted_ok);
-	buf[1] = ioread32(&mac->frames_received_ok);
-	buf[2] = ioread32(&mac->frames_check_sequence_errors);
-	buf[3] = ioread32(&mac->alignment_errors);
+	buf[0] = csrrd32(priv->mac_dev,
+			 tse_csroffs(frames_transmitted_ok));
+	buf[1] = csrrd32(priv->mac_dev,
+			 tse_csroffs(frames_received_ok));
+	buf[2] = csrrd32(priv->mac_dev,
+			 tse_csroffs(frames_check_sequence_errors));
+	buf[3] = csrrd32(priv->mac_dev,
+			 tse_csroffs(alignment_errors));
 
 
 	/* Extended aOctetsTransmittedOK counter */
 	/* Extended aOctetsTransmittedOK counter */
-	ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-	ext |= ioread32(&mac->octets_transmitted_ok);
+	ext = (u64) csrrd32(priv->mac_dev,
+			    tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+	ext |= csrrd32(priv->mac_dev,
+		       tse_csroffs(octets_transmitted_ok));
 	buf[4] = ext;
 	buf[4] = ext;
 
 
 	/* Extended aOctetsReceivedOK counter */
 	/* Extended aOctetsReceivedOK counter */
-	ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-	ext |= ioread32(&mac->octets_received_ok);
+	ext = (u64) csrrd32(priv->mac_dev,
+			    tse_csroffs(msb_octets_received_ok)) << 32;
+
+	ext |= csrrd32(priv->mac_dev,
+		       tse_csroffs(octets_received_ok));
 	buf[5] = ext;
 	buf[5] = ext;
 
 
-	buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-	buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-	buf[8] = ioread32(&mac->if_in_errors);
-	buf[9] = ioread32(&mac->if_out_errors);
-	buf[10] = ioread32(&mac->if_in_ucast_pkts);
-	buf[11] = ioread32(&mac->if_in_multicast_pkts);
-	buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-	buf[13] = ioread32(&mac->if_out_discards);
-	buf[14] = ioread32(&mac->if_out_ucast_pkts);
-	buf[15] = ioread32(&mac->if_out_multicast_pkts);
-	buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-	buf[17] = ioread32(&mac->ether_stats_drop_events);
+	buf[6] = csrrd32(priv->mac_dev,
+			 tse_csroffs(tx_pause_mac_ctrl_frames));
+	buf[7] = csrrd32(priv->mac_dev,
+			 tse_csroffs(rx_pause_mac_ctrl_frames));
+	buf[8] = csrrd32(priv->mac_dev,
+			 tse_csroffs(if_in_errors));
+	buf[9] = csrrd32(priv->mac_dev,
+			 tse_csroffs(if_out_errors));
+	buf[10] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_in_ucast_pkts));
+	buf[11] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_in_multicast_pkts));
+	buf[12] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_in_broadcast_pkts));
+	buf[13] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_discards));
+	buf[14] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_ucast_pkts));
+	buf[15] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_multicast_pkts));
+	buf[16] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_broadcast_pkts));
+	buf[17] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_drop_events));
 
 
 	/* Extended etherStatsOctets counter */
 	/* Extended etherStatsOctets counter */
-	ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-	ext |= ioread32(&mac->ether_stats_octets);
+	ext = (u64) csrrd32(priv->mac_dev,
+			    tse_csroffs(msb_ether_stats_octets)) << 32;
+	ext |= csrrd32(priv->mac_dev,
+		       tse_csroffs(ether_stats_octets));
 	buf[18] = ext;
 	buf[18] = ext;
 
 
-	buf[19] = ioread32(&mac->ether_stats_pkts);
-	buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-	buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-	buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-	buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-	buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-	buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-	buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-	buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-	buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-	buf[29] = ioread32(&mac->ether_stats_jabbers);
-	buf[30] = ioread32(&mac->ether_stats_fragments);
+	buf[19] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts));
+	buf[20] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_undersize_pkts));
+	buf[21] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_oversize_pkts));
+	buf[22] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_64_octets));
+	buf[23] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_65to127_octets));
+	buf[24] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_128to255_octets));
+	buf[25] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_256to511_octets));
+	buf[26] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_512to1023_octets));
+	buf[27] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_1024to1518_octets));
+	buf[28] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_1519tox_octets));
+	buf[29] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_jabbers));
+	buf[30] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_fragments));
 }
 }
 
 
 static int tse_sset_count(struct net_device *dev, int sset)
 static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
 {
 	int i;
 	int i;
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct altera_tse_private *priv = netdev_priv(dev);
-	u32 *tse_mac_regs = (u32 *)priv->mac_dev;
 	u32 *buf = regbuf;
 	u32 *buf = regbuf;
 
 
 	/* Set version to a known value, so ethtool knows
 	/* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 	regs->version = 1;
 	regs->version = 1;
 
 
 	for (i = 0; i < TSE_NUM_REGS; i++)
 	for (i = 0; i < TSE_NUM_REGS; i++)
-		buf[i] = ioread32(&tse_mac_regs[i]);
+		buf[i] = csrrd32(priv->mac_dev, i * 4);
 }
 }
 
 
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)

+ 76 - 57
drivers/net/ethernet/altera/altera_tse_main.c

@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  */
  */
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
 {
-	struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-	unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
-	u32 data;
+	struct net_device *ndev = bus->priv;
+	struct altera_tse_private *priv = netdev_priv(ndev);
 
 
 	/* set MDIO address */
 	/* set MDIO address */
-	iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+	csrwr32((mii_id & 0x1f), priv->mac_dev,
+		tse_csroffs(mdio_phy0_addr));
 
 
 	/* get the data */
 	/* get the data */
-	data = ioread32(&mdio_regs[regnum]) & 0xffff;
-	return data;
+	return csrrd32(priv->mac_dev,
+		       tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 }
 }
 
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 				 u16 value)
 				 u16 value)
 {
 {
-	struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-	unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+	struct net_device *ndev = bus->priv;
+	struct altera_tse_private *priv = netdev_priv(ndev);
 
 
 	/* set MDIO address */
 	/* set MDIO address */
-	iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+	csrwr32((mii_id & 0x1f), priv->mac_dev,
+		tse_csroffs(mdio_phy0_addr));
 
 
 	/* write the data */
 	/* write the data */
-	iowrite32((u32) value, &mdio_regs[regnum]);
+	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 		mdio->irq[i] = PHY_POLL;
 		mdio->irq[i] = PHY_POLL;
 
 
-	mdio->priv = priv->mac_dev;
+	mdio->priv = dev;
 	mdio->parent = priv->device;
 	mdio->parent = priv->device;
 
 
 	ret = of_mdiobus_register(mdio, mdio_node);
 	ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	unsigned int nopaged_len = skb_headlen(skb);
 	unsigned int nopaged_len = skb_headlen(skb);
 	enum netdev_tx ret = NETDEV_TX_OK;
 	enum netdev_tx ret = NETDEV_TX_OK;
 	dma_addr_t dma_addr;
 	dma_addr_t dma_addr;
-	int txcomplete = 0;
 
 
 	spin_lock_bh(&priv->tx_lock);
 	spin_lock_bh(&priv->tx_lock);
 
 
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	dma_sync_single_for_device(priv->device, buffer->dma_addr,
 	dma_sync_single_for_device(priv->device, buffer->dma_addr,
 				   buffer->len, DMA_TO_DEVICE);
 				   buffer->len, DMA_TO_DEVICE);
 
 
-	txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+	priv->dmaops->tx_buffer(priv, buffer);
 
 
 	skb_tx_timestamp(skb);
 	skb_tx_timestamp(skb);
 
 
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct phy_device *phydev = NULL;
 	struct phy_device *phydev = NULL;
 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-	int ret;
 
 
 	if (priv->phy_addr != POLL_PHY) {
 	if (priv->phy_addr != POLL_PHY) {
 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
 			netdev_err(dev, "Could not attach to PHY\n");
 			netdev_err(dev, "Could not attach to PHY\n");
 
 
 	} else {
 	} else {
+		int ret;
 		phydev = phy_find_first(priv->mdio);
 		phydev = phy_find_first(priv->mdio);
 		if (phydev == NULL) {
 		if (phydev == NULL) {
 			netdev_err(dev, "No PHY found\n");
 			netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
 
 
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 {
 {
-	struct altera_tse_mac *mac = priv->mac_dev;
 	u32 msb;
 	u32 msb;
 	u32 lsb;
 	u32 lsb;
 
 
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 
 
 	/* Set primary MAC address */
 	/* Set primary MAC address */
-	iowrite32(msb, &mac->mac_addr_0);
-	iowrite32(lsb, &mac->mac_addr_1);
+	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 }
 }
 
 
 /* MAC software reset.
 /* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  */
  */
 static int reset_mac(struct altera_tse_private *priv)
 static int reset_mac(struct altera_tse_private *priv)
 {
 {
-	void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
 	int counter;
 	int counter;
 	u32 dat;
 	u32 dat;
 
 
-	dat = ioread32(cmd_cfg_reg);
+	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
-	iowrite32(dat, cmd_cfg_reg);
+	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 
 
 	counter = 0;
 	counter = 0;
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+				     MAC_CMDCFG_SW_RESET))
 			break;
 			break;
 		udelay(1);
 		udelay(1);
 	}
 	}
 
 
 	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		dat = ioread32(cmd_cfg_reg);
+		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 		dat &= ~MAC_CMDCFG_SW_RESET;
 		dat &= ~MAC_CMDCFG_SW_RESET;
-		iowrite32(dat, cmd_cfg_reg);
+		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 		return -1;
 		return -1;
 	}
 	}
 	return 0;
 	return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
 */
 */
 static int init_mac(struct altera_tse_private *priv)
 static int init_mac(struct altera_tse_private *priv)
 {
 {
-	struct altera_tse_mac *mac = priv->mac_dev;
 	unsigned int cmd = 0;
 	unsigned int cmd = 0;
 	u32 frm_length;
 	u32 frm_length;
 
 
 	/* Setup Rx FIFO */
 	/* Setup Rx FIFO */
-	iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
-		  &mac->rx_section_empty);
-	iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
-	iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
-	iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+		priv->mac_dev, tse_csroffs(rx_section_empty));
+
+	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+		tse_csroffs(rx_section_full));
+
+	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+		tse_csroffs(rx_almost_empty));
+
+	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+		tse_csroffs(rx_almost_full));
 
 
 	/* Setup Tx FIFO */
 	/* Setup Tx FIFO */
-	iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
-		  &mac->tx_section_empty);
-	iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
-	iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
-	iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+		priv->mac_dev, tse_csroffs(tx_section_empty));
+
+	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+		tse_csroffs(tx_section_full));
+
+	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+		tse_csroffs(tx_almost_empty));
+
+	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+		tse_csroffs(tx_almost_full));
 
 
 	/* MAC Address Configuration */
 	/* MAC Address Configuration */
 	tse_update_mac_addr(priv, priv->dev->dev_addr);
 	tse_update_mac_addr(priv, priv->dev->dev_addr);
 
 
 	/* MAC Function Configuration */
 	/* MAC Function Configuration */
 	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
-	iowrite32(frm_length, &mac->frm_length);
-	iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+		tse_csroffs(tx_ipg_length));
 
 
 	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 	 * start address
 	 * start address
 	 */
 	 */
-	tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
-	tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
-					 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
 
 	/* Set the MAC options */
 	/* Set the MAC options */
-	cmd = ioread32(&mac->command_config);
+	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
 	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
 	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 	cmd &= ~MAC_CMDCFG_ENA_10;
 	cmd &= ~MAC_CMDCFG_ENA_10;
 
 
-	iowrite32(cmd, &mac->command_config);
+	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 
 
-	iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+		tse_csroffs(pause_quanta));
 
 
 	if (netif_msg_hw(priv))
 	if (netif_msg_hw(priv))
 		dev_dbg(priv->device,
 		dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
  */
  */
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 {
 {
-	struct altera_tse_mac *mac = priv->mac_dev;
-	u32 value = ioread32(&mac->command_config);
+	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 
 
 	if (enable)
 	if (enable)
 		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 	else
 	else
 		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 
 
-	iowrite32(value, &mac->command_config);
+	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 }
 }
 
 
 /* Change the MTU
 /* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 {
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 	int i;
 	int i;
 	struct netdev_hw_addr *ha;
 	struct netdev_hw_addr *ha;
 
 
 	/* clear the hash filter */
 	/* clear the hash filter */
 	for (i = 0; i < 64; i++)
 	for (i = 0; i < 64; i++)
-		iowrite32(0, &(mac->hash_table[i]));
+		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 
 
 	netdev_for_each_mc_addr(ha, dev) {
 	netdev_for_each_mc_addr(ha, dev) {
 		unsigned int hash = 0;
 		unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 
 
 			hash = (hash << 1) | xor_bit;
 			hash = (hash << 1) | xor_bit;
 		}
 		}
-		iowrite32(1, &(mac->hash_table[hash]));
+		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
 	}
 	}
 }
 }
 
 
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 {
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 	int i;
 	int i;
 
 
 	/* set the hash filter */
 	/* set the hash filter */
 	for (i = 0; i < 64; i++)
 	for (i = 0; i < 64; i++)
-		iowrite32(1, &(mac->hash_table[i]));
+		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 }
 }
 
 
 /* Set or clear the multicast filter for this adaptor
 /* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 {
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 
 
 	spin_lock(&priv->mac_cfg_lock);
 	spin_lock(&priv->mac_cfg_lock);
 
 
 	if (dev->flags & IFF_PROMISC)
 	if (dev->flags & IFF_PROMISC)
-		tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+			    MAC_CMDCFG_PROMIS_EN);
 
 
 	if (dev->flags & IFF_ALLMULTI)
 	if (dev->flags & IFF_ALLMULTI)
 		altera_tse_set_mcfilterall(dev);
 		altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 {
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 
 
 	spin_lock(&priv->mac_cfg_lock);
 	spin_lock(&priv->mac_cfg_lock);
 
 
 	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
 	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
 	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
 	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
-		tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+			    MAC_CMDCFG_PROMIS_EN);
 	else
 	else
-		tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+			      MAC_CMDCFG_PROMIS_EN);
 
 
 	spin_unlock(&priv->mac_cfg_lock);
 	spin_unlock(&priv->mac_cfg_lock);
 }
 }
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
 		of_property_read_bool(pdev->dev.of_node,
 		of_property_read_bool(pdev->dev.of_node,
 				      "altr,has-hash-multicast-filter");
 				      "altr,has-hash-multicast-filter");
 
 
+	/* Set hash filter to not set for now until the
+	 * multicast filter receive issue is debugged
+	 */
+	priv->hash_filter = 0;
+
 	/* get supplemental address settings for this instance */
 	/* get supplemental address settings for this instance */
 	priv->added_unicast =
 	priv->added_unicast =
 		of_property_read_bool(pdev->dev.of_node,
 		of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
 	.altera_dtype = ALTERA_DTYPE_SGDMA,
 	.altera_dtype = ALTERA_DTYPE_SGDMA,
 	.dmamask = 32,
 	.dmamask = 32,
 	.reset_dma = sgdma_reset,
 	.reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
 	.start_rxdma = sgdma_start_rxdma,
 	.start_rxdma = sgdma_start_rxdma,
 };
 };
 
 
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
 	.altera_dtype = ALTERA_DTYPE_MSGDMA,
 	.altera_dtype = ALTERA_DTYPE_MSGDMA,
 	.dmamask = 64,
 	.dmamask = 64,
 	.reset_dma = msgdma_reset,
 	.reset_dma = msgdma_reset,

+ 10 - 10
drivers/net/ethernet/altera/altera_utils.c

@@ -17,28 +17,28 @@
 #include "altera_tse.h"
 #include "altera_tse.h"
 #include "altera_utils.h"
 #include "altera_utils.h"
 
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	value |= bit_mask;
 	value |= bit_mask;
-	iowrite32(value, ioaddr);
+	csrwr32(value, ioaddr, offs);
 }
 }
 
 
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	value &= ~bit_mask;
 	value &= ~bit_mask;
-	iowrite32(value, ioaddr);
+	csrwr32(value, ioaddr, offs);
 }
 }
 
 
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	return (value & bit_mask) ? 1 : 0;
 	return (value & bit_mask) ? 1 : 0;
 }
 }
 
 
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	return (value & bit_mask) ? 0 : 1;
 	return (value & bit_mask) ? 0 : 1;
 }
 }

+ 4 - 4
drivers/net/ethernet/altera/altera_utils.h

@@ -19,9 +19,9 @@
 #ifndef __ALTERA_UTILS_H__
 #ifndef __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
 
 
 #endif /* __ALTERA_UTILS_H__*/
 #endif /* __ALTERA_UTILS_H__*/

+ 7 - 3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c

@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR	(0x07)
 #define BCM_5710_UNDI_FW_MF_MAJOR	(0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR	(0x08)
 #define BCM_5710_UNDI_FW_MF_MINOR	(0x08)
 #define BCM_5710_UNDI_FW_MF_VERS	(0x05)
 #define BCM_5710_UNDI_FW_MF_VERS	(0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p)	(0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f)	(0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 {
 {
 	u8 major, minor, version;
 	u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 	/* Reset should be performed after BRB is emptied */
 	/* Reset should be performed after BRB is emptied */
 	if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
 	if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
 		u32 timer_count = 1000;
 		u32 timer_count = 1000;
+		bool need_write = true;
 
 
 		/* Close the MAC Rx to prevent BRB from filling up */
 		/* Close the MAC Rx to prevent BRB from filling up */
 		bnx2x_prev_unload_close_mac(bp, &mac_vals);
 		bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 			 * cleaning methods - might be redundant but harmless.
 			 * cleaning methods - might be redundant but harmless.
 			 */
 			 */
 			if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
 			if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-				bnx2x_prev_unload_undi_mf(bp);
+				if (need_write) {
+					bnx2x_prev_unload_undi_mf(bp);
+					need_write = false;
+				}
 			} else if (prev_undi) {
 			} else if (prev_undi) {
 				/* If UNDI resides in memory,
 				/* If UNDI resides in memory,
 				 * manually increment it
 				 * manually increment it

+ 1 - 1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c

@@ -2695,7 +2695,7 @@ out:
 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
 	}
 	}
 
 
-	return 0;
+	return rc;
 }
 }
 
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)

+ 1 - 1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c

@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 out:
 out:
 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
 
-	return 0;
+	return rc;
 }
 }
 
 
 /* request pf to config rss table for vf queues*/
 /* request pf to config rss table for vf queues*/

+ 706 - 0
drivers/net/ethernet/ec_bhf.c

@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC	20000
+
+#define INFO_BLOCK_SIZE		0x10
+#define INFO_BLOCK_TYPE		0x0
+#define INFO_BLOCK_REV		0x2
+#define INFO_BLOCK_BLK_CNT	0x4
+#define INFO_BLOCK_TX_CHAN	0x4
+#define INFO_BLOCK_RX_CHAN	0x5
+#define INFO_BLOCK_OFFSET	0x8
+
+#define EC_MII_OFFSET		0x4
+#define EC_FIFO_OFFSET		0x8
+#define EC_MAC_OFFSET		0xc
+
+#define MAC_FRAME_ERR_CNT	0x0
+#define MAC_RX_ERR_CNT		0x1
+#define MAC_CRC_ERR_CNT		0x2
+#define MAC_LNK_LST_ERR_CNT	0x3
+#define MAC_TX_FRAME_CNT	0x10
+#define MAC_RX_FRAME_CNT	0x14
+#define MAC_TX_FIFO_LVL		0x20
+#define MAC_DROPPED_FRMS	0x28
+#define MAC_CONNECTED_CCAT_FLAG	0x78
+
+#define MII_MAC_ADDR		0x8
+#define MII_MAC_FILT_FLAG	0xe
+#define MII_LINK_STATUS		0xf
+
+#define FIFO_TX_REG		0x0
+#define FIFO_TX_RESET		0x8
+#define FIFO_RX_REG		0x10
+#define FIFO_RX_ADDR_VALID	(1u << 31)
+#define FIFO_RX_RESET		0x18
+
+#define DMA_CHAN_OFFSET		0x1000
+#define DMA_CHAN_SIZE		0x8
+
+#define DMA_WINDOW_SIZE_MASK	0xfffffffc
+
+static struct pci_device_id ids[] = {
+	{ PCI_DEVICE(0x15ec, 0x5000), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK	0xffffffu
+#define RXHDR_NEXT_VALID	(1u << 31)
+	__le32 next;
+#define RXHDR_NEXT_RECV_FLAG	0x1
+	__le32 recv;
+#define RXHDR_LEN_MASK		0xfffu
+	__le16 len;
+	__le16 port;
+	__le32 reserved;
+	u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE	0x7e8
+struct rx_desc {
+	struct rx_header header;
+	u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+	__le16 len;
+#define TX_HDR_PORT_0		0x1
+#define TX_HDR_PORT_1		0x2
+	u8 port;
+	u8 ts_enable;
+#define TX_HDR_SENT		0x1
+	__le32 sent;
+	u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+	struct tx_header header;
+	u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE		64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+	u8 *buf;
+	size_t len;
+	dma_addr_t buf_phys;
+
+	u8 *alloc;
+	size_t alloc_len;
+	dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+	struct net_device *net_dev;
+
+	struct pci_dev *dev;
+
+	void * __iomem io;
+	void * __iomem dma_io;
+
+	struct hrtimer hrtimer;
+
+	int tx_dma_chan;
+	int rx_dma_chan;
+	void * __iomem ec_io;
+	void * __iomem fifo_io;
+	void * __iomem mii_io;
+	void * __iomem mac_io;
+
+	struct bhf_dma rx_buf;
+	struct rx_desc *rx_descs;
+	int rx_dnext;
+	int rx_dcount;
+
+	struct bhf_dma tx_buf;
+	struct tx_desc *tx_descs;
+	int tx_dcount;
+	int tx_dnext;
+
+	u64 stat_rx_bytes;
+	u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID	0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+	struct device *dev = PRIV_TO_DEV(priv);
+
+	dev_dbg(dev, "Frame error counter: %d\n",
+		ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+	dev_dbg(dev, "RX error counter: %d\n",
+		ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+	dev_dbg(dev, "CRC error counter: %d\n",
+		ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+	dev_dbg(dev, "TX frame counter: %d\n",
+		ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+	dev_dbg(dev, "RX frame counter: %d\n",
+		ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+	dev_dbg(dev, "TX fifo level: %d\n",
+		ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+	dev_dbg(dev, "Dropped frames: %d\n",
+		ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+	dev_dbg(dev, "Connected with CCAT slot: %d\n",
+		ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+	dev_dbg(dev, "Link status: %d\n",
+		ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+	iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+	iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+	iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+	iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+	iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+	iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+	iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+	iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+	iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+	iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+	u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+	u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+	iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+	dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+	return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+	if (unlikely(netif_queue_stopped(priv->net_dev))) {
+		/* Make sure that we perceive changes to tx_dnext. */
+		smp_rmb();
+
+		if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+			netif_wake_queue(priv->net_dev);
+	}
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+	return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+	iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+		  priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+	struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+	struct device *dev = PRIV_TO_DEV(priv);
+
+	while (ec_bhf_pkt_received(desc)) {
+		int pkt_size = (le16_to_cpu(desc->header.len) &
+			       RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+		u8 *data = desc->data;
+		struct sk_buff *skb;
+
+		skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+		dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+		if (skb) {
+			memcpy(skb_put(skb, pkt_size), data, pkt_size);
+			skb->protocol = eth_type_trans(skb, priv->net_dev);
+			dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+			priv->stat_rx_bytes += pkt_size;
+
+			netif_rx(skb);
+		} else {
+			dev_err_ratelimited(dev,
+				"Couldn't allocate a skb_buff for a packet of size %u\n",
+				pkt_size);
+		}
+
+		desc->header.recv = 0;
+
+		ec_bhf_add_rx_desc(priv, desc);
+
+		priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+		desc = &priv->rx_descs[priv->rx_dnext];
+	}
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+	struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+						hrtimer);
+	ec_bhf_process_rx(priv);
+	ec_bhf_process_tx(priv);
+
+	if (!netif_running(priv->net_dev))
+		return HRTIMER_NORESTART;
+
+	hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+	return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+	struct device *dev = PRIV_TO_DEV(priv);
+	unsigned block_count, i;
+	void * __iomem ec_info;
+
+	dev_dbg(dev, "Info block:\n");
+	dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+	dev_dbg(dev, "Revision of function: %x\n",
+		(unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+	block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+	dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+	for (i = 0; i < block_count; i++) {
+		u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+				    INFO_BLOCK_TYPE);
+		if (type == ETHERCAT_MASTER_ID)
+			break;
+	}
+	if (i == block_count) {
+		dev_err(dev, "EtherCAT master with DMA block not found\n");
+		return -ENODEV;
+	}
+	dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+	ec_info = priv->io + i * INFO_BLOCK_SIZE;
+	dev_dbg(dev, "EtherCAT master revision: %d\n",
+		ioread16(ec_info + INFO_BLOCK_REV));
+
+	priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+	dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+		priv->tx_dma_chan);
+
+	priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+	dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+		 priv->rx_dma_chan);
+
+	priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+	priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+	priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+	priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+	dev_dbg(dev,
+		"EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+		priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+	return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+				     struct net_device *net_dev)
+{
+	struct ec_bhf_priv *priv = netdev_priv(net_dev);
+	struct tx_desc *desc;
+	unsigned len;
+
+	dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+	desc = &priv->tx_descs[priv->tx_dnext];
+
+	skb_copy_and_csum_dev(skb, desc->data);
+	len = skb->len;
+
+	memset(&desc->header, 0, sizeof(desc->header));
+	desc->header.len = cpu_to_le16(len);
+	desc->header.port = TX_HDR_PORT_0;
+
+	ec_bhf_send_packet(priv, desc);
+
+	priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+	if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+		/* Make sure that update updates to tx_dnext are perceived
+		 * by timer routine.
+		 */
+		smp_wmb();
+
+		netif_stop_queue(net_dev);
+
+		dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+		ec_bhf_print_status(priv);
+	}
+
+	priv->stat_tx_bytes += len;
+
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+				struct bhf_dma *buf,
+				int channel,
+				int size)
+{
+	int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+	struct device *dev = PRIV_TO_DEV(priv);
+	u32 mask;
+
+	iowrite32(0xffffffff, priv->dma_io + offset);
+
+	mask = ioread32(priv->dma_io + offset);
+	mask &= DMA_WINDOW_SIZE_MASK;
+	dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+	/* We want to allocate a chunk of memory that is:
+	 * - aligned to the mask we just read
+	 * - is of size 2^mask bytes (at most)
+	 * In order to ensure that we will allocate buffer of
+	 * 2 * 2^mask bytes.
+	 */
+	buf->len = min_t(int, ~mask + 1, size);
+	buf->alloc_len = 2 * buf->len;
+
+	dev_dbg(dev, "Allocating %d bytes for channel %d",
+		(int)buf->alloc_len, channel);
+	buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+					GFP_KERNEL);
+	if (buf->alloc == NULL) {
+		dev_info(dev, "Failed to allocate buffer\n");
+		return -ENOMEM;
+	}
+
+	buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+	buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+	iowrite32(0, priv->dma_io + offset + 4);
+	iowrite32(buf->buf_phys, priv->dma_io + offset);
+	dev_dbg(dev, "Buffer: %x and read from dev: %x",
+		(unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+	return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+	int i = 0;
+
+	priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+	priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+	priv->tx_dnext = 0;
+
+	for (i = 0; i < priv->tx_dcount; i++)
+		priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+	int i;
+
+	priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+	priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+	priv->rx_dnext = 0;
+
+	for (i = 0; i < priv->rx_dcount; i++) {
+		struct rx_desc *desc = &priv->rx_descs[i];
+		u32 next;
+
+		if (i != priv->rx_dcount - 1)
+			next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+		else
+			next = 0;
+		next |= RXHDR_NEXT_VALID;
+		desc->header.next = cpu_to_le32(next);
+		desc->header.recv = 0;
+		ec_bhf_add_rx_desc(priv, desc);
+	}
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+	struct ec_bhf_priv *priv = netdev_priv(net_dev);
+	struct device *dev = PRIV_TO_DEV(priv);
+	int err = 0;
+
+	dev_info(dev, "Opening device\n");
+
+	ec_bhf_reset(priv);
+
+	err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+				   FIFO_SIZE * sizeof(struct rx_desc));
+	if (err) {
+		dev_err(dev, "Failed to allocate rx buffer\n");
+		goto out;
+	}
+	ec_bhf_setup_rx_descs(priv);
+
+	dev_info(dev, "RX buffer allocated, address: %x\n",
+		 (unsigned)priv->rx_buf.buf_phys);
+
+	err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+				   FIFO_SIZE * sizeof(struct tx_desc));
+	if (err) {
+		dev_err(dev, "Failed to allocate tx buffer\n");
+		goto error_rx_free;
+	}
+	dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+		(unsigned)priv->tx_buf.buf_phys);
+
+	iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+	ec_bhf_setup_tx_descs(priv);
+
+	netif_start_queue(net_dev);
+
+	hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	priv->hrtimer.function = ec_bhf_timer_fun;
+	hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+		      HRTIMER_MODE_REL);
+
+	dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+	ec_bhf_print_status(priv);
+
+	return 0;
+
+error_rx_free:
+	dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+			  priv->rx_buf.alloc_len);
+out:
+	return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+	struct ec_bhf_priv *priv = netdev_priv(net_dev);
+	struct device *dev = PRIV_TO_DEV(priv);
+
+	hrtimer_cancel(&priv->hrtimer);
+
+	ec_bhf_reset(priv);
+
+	netif_tx_disable(net_dev);
+
+	dma_free_coherent(dev, priv->tx_buf.alloc_len,
+			  priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+	dma_free_coherent(dev, priv->rx_buf.alloc_len,
+			  priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+	return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+		 struct rtnl_link_stats64 *stats)
+{
+	struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+	stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+				ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+				ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+	stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+	stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+	stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+	stats->tx_bytes = priv->stat_tx_bytes;
+	stats->rx_bytes = priv->stat_rx_bytes;
+
+	return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+	.ndo_start_xmit		= ec_bhf_start_xmit,
+	.ndo_open		= ec_bhf_open,
+	.ndo_stop		= ec_bhf_stop,
+	.ndo_get_stats64	= ec_bhf_get_stats,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	struct net_device *net_dev;
+	struct ec_bhf_priv *priv;
+	void * __iomem dma_io;
+	void * __iomem io;
+	int err = 0;
+
+	err = pci_enable_device(dev);
+	if (err)
+		return err;
+
+	pci_set_master(dev);
+
+	err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+	if (err) {
+		dev_err(&dev->dev,
+			"Required dma mask not supported, failed to initialize device\n");
+		err = -EIO;
+		goto err_disable_dev;
+	}
+
+	err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+	if (err) {
+		dev_err(&dev->dev,
+			"Required dma mask not supported, failed to initialize device\n");
+		goto err_disable_dev;
+	}
+
+	err = pci_request_regions(dev, "ec_bhf");
+	if (err) {
+		dev_err(&dev->dev, "Failed to request pci memory regions\n");
+		goto err_disable_dev;
+	}
+
+	io = pci_iomap(dev, 0, 0);
+	if (!io) {
+		dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+		err = -EIO;
+		goto err_release_regions;
+	}
+
+	dma_io = pci_iomap(dev, 2, 0);
+	if (!dma_io) {
+		dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+		err = -EIO;
+		goto err_unmap;
+	}
+
+	net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+	if (net_dev == 0) {
+		err = -ENOMEM;
+		goto err_unmap_dma_io;
+	}
+
+	pci_set_drvdata(dev, net_dev);
+	SET_NETDEV_DEV(net_dev, &dev->dev);
+
+	net_dev->features = 0;
+	net_dev->flags |= IFF_NOARP;
+
+	net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+	priv = netdev_priv(net_dev);
+	priv->net_dev = net_dev;
+	priv->io = io;
+	priv->dma_io = dma_io;
+	priv->dev = dev;
+
+	err = ec_bhf_setup_offsets(priv);
+	if (err < 0)
+		goto err_free_net_dev;
+
+	memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+	dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+		net_dev->dev_addr);
+
+	err = register_netdev(net_dev);
+	if (err < 0)
+		goto err_free_net_dev;
+
+	return 0;
+
+err_free_net_dev:
+	free_netdev(net_dev);
+err_unmap_dma_io:
+	pci_iounmap(dev, dma_io);
+err_unmap:
+	pci_iounmap(dev, io);
+err_release_regions:
+	pci_release_regions(dev);
+err_disable_dev:
+	pci_clear_master(dev);
+	pci_disable_device(dev);
+
+	return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+	struct net_device *net_dev = pci_get_drvdata(dev);
+	struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+	unregister_netdev(net_dev);
+	free_netdev(net_dev);
+
+	pci_iounmap(dev, priv->dma_io);
+	pci_iounmap(dev, priv->io);
+	pci_release_regions(dev);
+	pci_clear_master(dev);
+	pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+	.name		= "ec_bhf",
+	.id_table	= ids,
+	.probe		= ec_bhf_probe,
+	.remove		= ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+	return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+	pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");

+ 6 - 0
drivers/net/ethernet/emulex/benet/be_main.c

@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
 	if (status)
 	if (status)
 		goto err;
 		goto err;
 
 
+	/* On some BE3 FW versions, after a HW reset,
+	 * interrupts will remain disabled for each function.
+	 * So, explicitly enable interrupts
+	 */
+	be_intr_set(adapter, true);
+
 	/* tell fw we're ready to fire cmds */
 	/* tell fw we're ready to fire cmds */
 	status = be_cmd_fw_init(adapter);
 	status = be_cmd_fw_init(adapter);
 	if (status)
 	if (status)

+ 47 - 6
drivers/net/ethernet/jme.c

@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
 	return idx;
 	return idx;
 }
 }
 
 
-static void
+static int
 jme_fill_tx_map(struct pci_dev *pdev,
 jme_fill_tx_map(struct pci_dev *pdev,
 		struct txdesc *txdesc,
 		struct txdesc *txdesc,
 		struct jme_buffer_info *txbi,
 		struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
 				len,
 				len,
 				PCI_DMA_TODEVICE);
 				PCI_DMA_TODEVICE);
 
 
+	if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+		return -EINVAL;
+
 	pci_dma_sync_single_for_device(pdev,
 	pci_dma_sync_single_for_device(pdev,
 				       dmaaddr,
 				       dmaaddr,
 				       len,
 				       len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
 
 
 	txbi->mapping = dmaaddr;
 	txbi->mapping = dmaaddr;
 	txbi->len = len;
 	txbi->len = len;
+	return 0;
 }
 }
 
 
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+	struct jme_ring *txring = &(jme->txring[0]);
+	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+	int mask = jme->tx_ring_mask;
+	int j;
+
+	for (j = 0 ; j < count ; j++) {
+		ctxbi = txbi + ((startidx + j + 2) & (mask));
+		pci_unmap_page(jme->pdev,
+				ctxbi->mapping,
+				ctxbi->len,
+				PCI_DMA_TODEVICE);
+
+				ctxbi->mapping = 0;
+				ctxbi->len = 0;
+	}
+
+}
+
+static int
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 {
 {
 	struct jme_ring *txring = &(jme->txring[0]);
 	struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 	int mask = jme->tx_ring_mask;
 	int mask = jme->tx_ring_mask;
 	const struct skb_frag_struct *frag;
 	const struct skb_frag_struct *frag;
 	u32 len;
 	u32 len;
+	int ret = 0;
 
 
 	for (i = 0 ; i < nr_frags ; ++i) {
 	for (i = 0 ; i < nr_frags ; ++i) {
 		frag = &skb_shinfo(skb)->frags[i];
 		frag = &skb_shinfo(skb)->frags[i];
 		ctxdesc = txdesc + ((idx + i + 2) & (mask));
 		ctxdesc = txdesc + ((idx + i + 2) & (mask));
 		ctxbi = txbi + ((idx + i + 2) & (mask));
 		ctxbi = txbi + ((idx + i + 2) & (mask));
 
 
-		jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+		ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
 				skb_frag_page(frag),
 				skb_frag_page(frag),
 				frag->page_offset, skb_frag_size(frag), hidma);
 				frag->page_offset, skb_frag_size(frag), hidma);
+		if (ret) {
+			jme_drop_tx_map(jme, idx, i);
+			goto out;
+		}
+
 	}
 	}
 
 
 	len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
 	len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
 	ctxdesc = txdesc + ((idx + 1) & (mask));
 	ctxdesc = txdesc + ((idx + 1) & (mask));
 	ctxbi = txbi + ((idx + 1) & (mask));
 	ctxbi = txbi + ((idx + 1) & (mask));
-	jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+	ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
 			offset_in_page(skb->data), len, hidma);
 			offset_in_page(skb->data), len, hidma);
+	if (ret)
+		jme_drop_tx_map(jme, idx, i);
+
+out:
+	return ret;
 
 
 }
 }
 
 
+
 static int
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
 {
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 	struct txdesc *txdesc;
 	struct txdesc *txdesc;
 	struct jme_buffer_info *txbi;
 	struct jme_buffer_info *txbi;
 	u8 flags;
 	u8 flags;
+	int ret = 0;
 
 
 	txdesc = (struct txdesc *)txring->desc + idx;
 	txdesc = (struct txdesc *)txring->desc + idx;
 	txbi = txring->bufinf + idx;
 	txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 	if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
 	if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
 		jme_tx_csum(jme, skb, &flags);
 		jme_tx_csum(jme, skb, &flags);
 	jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
 	jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
-	jme_map_tx_skb(jme, skb, idx);
+	ret = jme_map_tx_skb(jme, skb, idx);
+	if (ret)
+		return ret;
+
 	txdesc->desc1.flags = flags;
 	txdesc->desc1.flags = flags;
 	/*
 	/*
 	 * Set tx buffer info after telling NIC to send
 	 * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 		return NETDEV_TX_BUSY;
 		return NETDEV_TX_BUSY;
 	}
 	}
 
 
-	jme_fill_tx_desc(jme, skb, idx);
+	if (jme_fill_tx_desc(jme, skb, idx))
+		return NETDEV_TX_OK;
 
 
 	jwrite32(jme, JME_TXCS, jme->reg_txcs |
 	jwrite32(jme, JME_TXCS, jme->reg_txcs |
 				TXCS_SELECT_QUEUE0 |
 				TXCS_SELECT_QUEUE0 |

+ 2 - 2
drivers/net/ethernet/mellanox/mlx4/cmd.c

@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
 	},
 	},
 	{
 	{
 		.opcode = MLX4_CMD_UPDATE_QP,
 		.opcode = MLX4_CMD_UPDATE_QP,
-		.has_inbox = false,
+		.has_inbox = true,
 		.has_outbox = false,
 		.has_outbox = false,
 		.out_is_imm = false,
 		.out_is_imm = false,
 		.encode_slave_id = false,
 		.encode_slave_id = false,
 		.verify = NULL,
 		.verify = NULL,
-		.wrapper = mlx4_CMD_EPERM_wrapper
+		.wrapper = mlx4_UPDATE_QP_wrapper
 	},
 	},
 	{
 	{
 		.opcode = MLX4_CMD_GET_OP_REQ,
 		.opcode = MLX4_CMD_GET_OP_REQ,

+ 6 - 0
drivers/net/ethernet/mellanox/mlx4/mlx4.h

@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 			   struct mlx4_cmd_mailbox *outbox,
 			   struct mlx4_cmd_mailbox *outbox,
 			   struct mlx4_cmd_info *cmd);
 			   struct mlx4_cmd_info *cmd);
 
 
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
 			 struct mlx4_vhcr *vhcr,
 			 struct mlx4_vhcr *vhcr,
 			 struct mlx4_cmd_mailbox *inbox,
 			 struct mlx4_cmd_mailbox *inbox,

+ 35 - 0
drivers/net/ethernet/mellanox/mlx4/qp.c

@@ -389,6 +389,41 @@ err_icm:
 
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+		   enum mlx4_update_qp_attr attr,
+		   struct mlx4_update_qp_params *params)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_update_qp_context *cmd;
+	u64 pri_addr_path_mask = 0;
+	int err = 0;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+		return -EINVAL;
+
+	if (attr & MLX4_UPDATE_QP_SMAC) {
+		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+	}
+
+	cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+	err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+		       MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
 {
 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;

+ 54 - 0
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c

@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
 
 
 }
 }
 
 
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd_info)
+{
+	int err;
+	u32 qpn = vhcr->in_modifier & 0xffffff;
+	struct res_qp *rqp;
+	u64 mac;
+	unsigned port;
+	u64 pri_addr_path_mask;
+	struct mlx4_update_qp_context *cmd;
+	int smac_index;
+
+	cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+		return -EPERM;
+
+	/* Just change the smac for the QP */
+	err = get_res(dev, slave, qpn, RES_QP, &rqp);
+	if (err) {
+		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+		return err;
+	}
+
+	port = (rqp->sched_queue >> 6 & 1) + 1;
+	smac_index = cmd->qp_context.pri_path.grh_mylmc;
+	err = mac_find_smac_ix_in_slave(dev, slave, port,
+					smac_index, &mac);
+	if (err) {
+		mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+			 qpn, smac_index);
+		goto err_mac;
+	}
+
+	err = mlx4_cmd(dev, inbox->dma,
+		       vhcr->in_modifier, 0,
+		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+		       MLX4_CMD_NATIVE);
+	if (err) {
+		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+		goto err_mac;
+	}
+
+err_mac:
+	put_res(dev, slave, qpn, RES_QP);
+	return err;
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 					 struct mlx4_vhcr *vhcr,
 					 struct mlx4_vhcr *vhcr,
 					 struct mlx4_cmd_mailbox *inbox,
 					 struct mlx4_cmd_mailbox *inbox,

+ 0 - 16
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h

@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
 				tx_ring->producer;
 				tx_ring->producer;
 }
 }
 
 
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
-					     struct net_device *netdev)
-{
-	int err;
-
-	netdev->num_tx_queues = adapter->drv_tx_rings;
-	netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
-	err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-	if (err)
-		netdev_err(netdev, "failed to set %d Tx queues\n",
-			   adapter->drv_tx_rings);
-
-	return err;
-}
-
 struct qlcnic_nic_template {
 struct qlcnic_nic_template {
 	int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
 	int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
 	int (*config_led) (struct qlcnic_adapter *, u32, u32);
 	int (*config_led) (struct qlcnic_adapter *, u32, u32);

+ 53 - 4
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c

@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
 	ahw->max_uc_count = count;
 	ahw->max_uc_count = count;
 }
 }
 
 
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+				      u8 tx_queues, u8 rx_queues)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	if (tx_queues) {
+		err = netif_set_real_num_tx_queues(netdev, tx_queues);
+		if (err) {
+			netdev_err(netdev, "failed to set %d Tx queues\n",
+				   tx_queues);
+			return err;
+		}
+	}
+
+	if (rx_queues) {
+		err = netif_set_real_num_rx_queues(netdev, rx_queues);
+		if (err)
+			netdev_err(netdev, "failed to set %d Rx queues\n",
+				   rx_queues);
+	}
+
+	return err;
+}
+
 int
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
 		    int pci_using_dac)
 		    int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->irq = adapter->msix_entries[0].vector;
 	netdev->irq = adapter->msix_entries[0].vector;
 
 
-	err = qlcnic_set_real_num_queues(adapter, netdev);
+	err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+					 adapter->drv_sds_rings);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
 			    tx_ring->tx_stats.xmit_called,
 			    tx_ring->tx_stats.xmit_called,
 			    tx_ring->tx_stats.xmit_on,
 			    tx_ring->tx_stats.xmit_on,
 			    tx_ring->tx_stats.xmit_off);
 			    tx_ring->tx_stats.xmit_off);
+
+		if (tx_ring->crb_intr_mask)
+			netdev_info(netdev, "crb_intr_mask=%d\n",
+				    readl(tx_ring->crb_intr_mask));
+
 		netdev_info(netdev,
 		netdev_info(netdev,
-			    "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
-			    readl(tx_ring->crb_intr_mask),
+			    "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
 			    readl(tx_ring->crb_cmd_producer),
 			    readl(tx_ring->crb_cmd_producer),
 			    tx_ring->producer, tx_ring->sw_consumer,
 			    tx_ring->producer, tx_ring->sw_consumer,
 			    le32_to_cpu(*(tx_ring->hw_consumer)));
 			    le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
 {
 	struct net_device *netdev = adapter->netdev;
 	struct net_device *netdev = adapter->netdev;
+	u8 tx_rings, rx_rings;
 	int err;
 	int err;
 
 
 	if (test_bit(__QLCNIC_RESETTING, &adapter->state))
 	if (test_bit(__QLCNIC_RESETTING, &adapter->state))
 		return -EBUSY;
 		return -EBUSY;
 
 
+	tx_rings = adapter->drv_tss_rings;
+	rx_rings = adapter->drv_rss_rings;
+
 	netif_device_detach(netdev);
 	netif_device_detach(netdev);
+
+	err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+	if (err)
+		goto done;
+
 	if (netif_running(netdev))
 	if (netif_running(netdev))
 		__qlcnic_down(adapter, netdev);
 		__qlcnic_down(adapter, netdev);
 
 
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 		return err;
 		return err;
 	}
 	}
 
 
-	netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+	/* Check if we need to update real_num_{tx|rx}_queues because
+	 * qlcnic_setup_intr() may change Tx/Rx rings size
+	 */
+	if ((tx_rings != adapter->drv_tx_rings) ||
+	    (rx_rings != adapter->drv_sds_rings)) {
+		err = qlcnic_set_real_num_queues(adapter,
+						 adapter->drv_tx_rings,
+						 adapter->drv_sds_rings);
+		if (err)
+			goto done;
+	}
 
 
 	if (qlcnic_83xx_check(adapter)) {
 	if (qlcnic_83xx_check(adapter)) {
 		qlcnic_83xx_initialize_nic(adapter, 1);
 		qlcnic_83xx_initialize_nic(adapter, 1);

+ 8 - 6
drivers/net/ethernet/sfc/nic.c

@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
 	efx->net_dev->rx_cpu_rmap = NULL;
 	efx->net_dev->rx_cpu_rmap = NULL;
 #endif
 #endif
 
 
-	/* Disable MSI/MSI-X interrupts */
-	efx_for_each_channel(channel, efx)
-		free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
-	/* Disable legacy interrupt */
-	if (efx->legacy_irq)
+	if (EFX_INT_MODE_USE_MSI(efx)) {
+		/* Disable MSI/MSI-X interrupts */
+		efx_for_each_channel(channel, efx)
+			free_irq(channel->irq,
+				 &efx->msi_context[channel->channel]);
+	} else {
+		/* Disable legacy interrupt */
 		free_irq(efx->legacy_irq, efx);
 		free_irq(efx->legacy_irq, efx);
+	}
 }
 }
 
 
 /* Register dump */
 /* Register dump */

+ 1 - 3
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
 		if (ret) {
 		if (ret) {
 			pr_err("%s: Cannot attach to PHY (error: %d)\n",
 			pr_err("%s: Cannot attach to PHY (error: %d)\n",
 			       __func__, ret);
 			       __func__, ret);
-			goto phy_error;
+			return ret;
 		}
 		}
 	}
 	}
 
 
@@ -1779,8 +1779,6 @@ init_error:
 dma_desc_error:
 dma_desc_error:
 	if (priv->phydev)
 	if (priv->phydev)
 		phy_disconnect(priv->phydev);
 		phy_disconnect(priv->phydev);
-phy_error:
-	clk_disable_unprepare(priv->stmmac_clk);
 
 
 	return ret;
 	return ret;
 }
 }

+ 1 - 1
drivers/net/ethernet/sun/cassini.c

@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
 	int i;
 	int i;
 
 
 	for (i = 0; i < N_TX_RINGS; i++)
 	for (i = 0; i < N_TX_RINGS; i++)
-		spin_lock(&cp->tx_lock[i]);
+		spin_lock_nested(&cp->tx_lock[i], i);
 }
 }
 
 
 static inline void cas_lock_all(struct cas *cp)
 static inline void cas_lock_all(struct cas *cp)

+ 6 - 11
drivers/net/ethernet/ti/cpsw.c

@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
 		mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
 		mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
 		phyid = be32_to_cpup(parp+1);
 		phyid = be32_to_cpup(parp+1);
 		mdio = of_find_device_by_node(mdio_node);
 		mdio = of_find_device_by_node(mdio_node);
-
-		if (strncmp(mdio->name, "gpio", 4) == 0) {
-			/* GPIO bitbang MDIO driver attached */
-			struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
-			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-				 PHY_ID_FMT, bus->id, phyid);
-		} else {
-			/* davinci MDIO driver attached */
-			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-				 PHY_ID_FMT, mdio->name, phyid);
+		of_node_put(mdio_node);
+		if (!mdio) {
+			pr_err("Missing mdio platform device\n");
+			return -EINVAL;
 		}
 		}
+		snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+			 PHY_ID_FMT, mdio->name, phyid);
 
 
 		mac_addr = of_get_mac_address(slave_node);
 		mac_addr = of_get_mac_address(slave_node);
 		if (mac_addr)
 		if (mac_addr)

+ 14 - 4
drivers/net/macvlan.c

@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
 	struct net_device *lowerdev = vlan->lowerdev;
 
 
-	if (change & IFF_ALLMULTI)
-		dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+	if (dev->flags & IFF_UP) {
+		if (change & IFF_ALLMULTI)
+			dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+	}
 }
 }
 
 
 static void macvlan_set_mac_lists(struct net_device *dev)
 static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
 #define MACVLAN_STATE_MASK \
 	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
 
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+	return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
 					  struct netdev_queue *txq,
 					  struct netdev_queue *txq,
 					  void *_unused)
 					  void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
 
 
 static void macvlan_set_lockdep_class(struct net_device *dev)
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
 {
-	lockdep_set_class(&dev->addr_list_lock,
-			  &macvlan_netdev_addr_lock_key);
+	lockdep_set_class_and_subclass(&dev->addr_list_lock,
+				       &macvlan_netdev_addr_lock_key,
+				       macvlan_get_nest_level(dev));
 	netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 	netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 }
 }
 
 
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
 	.ndo_fdb_add		= macvlan_fdb_add,
 	.ndo_fdb_add		= macvlan_fdb_add,
 	.ndo_fdb_del		= macvlan_fdb_del,
 	.ndo_fdb_del		= macvlan_fdb_del,
 	.ndo_fdb_dump		= ndo_dflt_fdb_dump,
 	.ndo_fdb_dump		= ndo_dflt_fdb_dump,
+	.ndo_get_lock_subclass  = macvlan_get_nest_level,
 };
 };
 
 
 void macvlan_common_setup(struct net_device *dev)
 void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 	vlan->dev      = dev;
 	vlan->dev      = dev;
 	vlan->port     = port;
 	vlan->port     = port;
 	vlan->set_features = MACVLAN_FEATURES;
 	vlan->set_features = MACVLAN_FEATURES;
+	vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
 
 
 	vlan->mode     = MACVLAN_MODE_VEPA;
 	vlan->mode     = MACVLAN_MODE_VEPA;
 	if (data && data[IFLA_MACVLAN_MODE])
 	if (data && data[IFLA_MACVLAN_MODE])

+ 4 - 0
drivers/net/phy/mdio-gpio.c

@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
 	if (pdev->dev.of_node) {
 	if (pdev->dev.of_node) {
 		pdata = mdio_gpio_of_get_data(pdev);
 		pdata = mdio_gpio_of_get_data(pdev);
 		bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
 		bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+		if (bus_id < 0) {
+			dev_warn(&pdev->dev, "failed to get alias id\n");
+			bus_id = 0;
+		}
 	} else {
 	} else {
 		pdata = dev_get_platdata(&pdev->dev);
 		pdata = dev_get_platdata(&pdev->dev);
 		bus_id = pdev->id;
 		bus_id = pdev->id;

+ 9 - 7
drivers/net/phy/phy.c

@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct phy_device *phydev =
 	struct phy_device *phydev =
 			container_of(dwork, struct phy_device, state_queue);
 			container_of(dwork, struct phy_device, state_queue);
-	int needs_aneg = 0, do_suspend = 0;
+	bool needs_aneg = false, do_suspend = false, do_resume = false;
 	int err = 0;
 	int err = 0;
 
 
 	mutex_lock(&phydev->lock);
 	mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
 	case PHY_PENDING:
 	case PHY_PENDING:
 		break;
 		break;
 	case PHY_UP:
 	case PHY_UP:
-		needs_aneg = 1;
+		needs_aneg = true;
 
 
 		phydev->link_timeout = PHY_AN_TIMEOUT;
 		phydev->link_timeout = PHY_AN_TIMEOUT;
 
 
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
 			phydev->adjust_link(phydev->attached_dev);
 			phydev->adjust_link(phydev->attached_dev);
 
 
 		} else if (0 == phydev->link_timeout--)
 		} else if (0 == phydev->link_timeout--)
-			needs_aneg = 1;
+			needs_aneg = true;
 		break;
 		break;
 	case PHY_NOLINK:
 	case PHY_NOLINK:
 		err = phy_read_status(phydev);
 		err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
 			netif_carrier_on(phydev->attached_dev);
 			netif_carrier_on(phydev->attached_dev);
 		} else {
 		} else {
 			if (0 == phydev->link_timeout--)
 			if (0 == phydev->link_timeout--)
-				needs_aneg = 1;
+				needs_aneg = true;
 		}
 		}
 
 
 		phydev->adjust_link(phydev->attached_dev);
 		phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
 			phydev->link = 0;
 			phydev->link = 0;
 			netif_carrier_off(phydev->attached_dev);
 			netif_carrier_off(phydev->attached_dev);
 			phydev->adjust_link(phydev->attached_dev);
 			phydev->adjust_link(phydev->attached_dev);
-			do_suspend = 1;
+			do_suspend = true;
 		}
 		}
 		break;
 		break;
 	case PHY_RESUMING:
 	case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
 			}
 			}
 			phydev->adjust_link(phydev->attached_dev);
 			phydev->adjust_link(phydev->attached_dev);
 		}
 		}
+		do_resume = true;
 		break;
 		break;
 	}
 	}
 
 
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
 
 
 	if (needs_aneg)
 	if (needs_aneg)
 		err = phy_start_aneg(phydev);
 		err = phy_start_aneg(phydev);
-
-	if (do_suspend)
+	else if (do_suspend)
 		phy_suspend(phydev);
 		phy_suspend(phydev);
+	else if (do_resume)
+		phy_resume(phydev);
 
 
 	if (err < 0)
 	if (err < 0)
 		phy_error(phydev);
 		phy_error(phydev);

+ 2 - 2
drivers/net/phy/phy_device.c

@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 	err = phy_init_hw(phydev);
 	err = phy_init_hw(phydev);
 	if (err)
 	if (err)
 		phy_detach(phydev);
 		phy_detach(phydev);
-
-	phy_resume(phydev);
+	else
+		phy_resume(phydev);
 
 
 	return err;
 	return err;
 }
 }

+ 41 - 16
drivers/net/usb/cdc_mbim.c

@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
 	cdc_ncm_unbind(dev, intf);
 	cdc_ncm_unbind(dev, intf);
 }
 }
 
 
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+	switch (proto) {
+	case htons(ETH_P_IP):
+	case htons(ETH_P_IPV6):
+		return true;
+	}
+	return false;
+}
 
 
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
 {
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
 	struct cdc_ncm_ctx *ctx = info->ctx;
 	struct cdc_ncm_ctx *ctx = info->ctx;
 	__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
 	__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
 	u16 tci = 0;
 	u16 tci = 0;
+	bool is_ip;
 	u8 *c;
 	u8 *c;
 
 
 	if (!ctx)
 	if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
 		if (skb->len <= ETH_HLEN)
 		if (skb->len <= ETH_HLEN)
 			goto error;
 			goto error;
 
 
+		/* Some applications using e.g. packet sockets will
+		 * bypass the VLAN acceleration and create tagged
+		 * ethernet frames directly.  We primarily look for
+		 * the accelerated out-of-band tag, but fall back if
+		 * required
+		 */
+		skb_reset_mac_header(skb);
+		if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+		    __vlan_get_tag(skb, &tci) == 0) {
+			is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+			skb_pull(skb, VLAN_ETH_HLEN);
+		} else {
+			is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+			skb_pull(skb, ETH_HLEN);
+		}
+
 		/* mapping VLANs to MBIM sessions:
 		/* mapping VLANs to MBIM sessions:
 		 *   no tag     => IPS session <0>
 		 *   no tag     => IPS session <0>
 		 *   1 - 255    => IPS session <vlanid>
 		 *   1 - 255    => IPS session <vlanid>
 		 *   256 - 511  => DSS session <vlanid - 256>
 		 *   256 - 511  => DSS session <vlanid - 256>
 		 *   512 - 4095 => unsupported, drop
 		 *   512 - 4095 => unsupported, drop
 		 */
 		 */
-		vlan_get_tag(skb, &tci);
-
 		switch (tci & 0x0f00) {
 		switch (tci & 0x0f00) {
 		case 0x0000: /* VLAN ID 0 - 255 */
 		case 0x0000: /* VLAN ID 0 - 255 */
-			/* verify that datagram is IPv4 or IPv6 */
-			skb_reset_mac_header(skb);
-			switch (eth_hdr(skb)->h_proto) {
-			case htons(ETH_P_IP):
-			case htons(ETH_P_IPV6):
-				break;
-			default:
+			if (!is_ip)
 				goto error;
 				goto error;
-			}
 			c = (u8 *)&sign;
 			c = (u8 *)&sign;
 			c[3] = tci;
 			c[3] = tci;
 			break;
 			break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
 				  "unsupported tci=0x%04x\n", tci);
 				  "unsupported tci=0x%04x\n", tci);
 			goto error;
 			goto error;
 		}
 		}
-		skb_pull(skb, ETH_HLEN);
 	}
 	}
 
 
 	spin_lock_bh(&ctx->mtx);
 	spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
 		return;
 		return;
 
 
 	/* need to send the NA on the VLAN dev, if any */
 	/* need to send the NA on the VLAN dev, if any */
-	if (tci)
+	rcu_read_lock();
+	if (tci) {
 		netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
 		netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
 					      tci);
 					      tci);
-	else
+		if (!netdev) {
+			rcu_read_unlock();
+			return;
+		}
+	} else {
 		netdev = dev->net;
 		netdev = dev->net;
-	if (!netdev)
-		return;
+	}
+	dev_hold(netdev);
+	rcu_read_unlock();
 
 
 	in6_dev = in6_dev_get(netdev);
 	in6_dev = in6_dev_get(netdev);
 	if (!in6_dev)
 	if (!in6_dev)
-		return;
+		goto out;
 	is_router = !!in6_dev->cnf.forwarding;
 	is_router = !!in6_dev->cnf.forwarding;
 	in6_dev_put(in6_dev);
 	in6_dev_put(in6_dev);
 
 
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
 				 true /* solicited */,
 				 true /* solicited */,
 				 false /* override */,
 				 false /* override */,
 				 true /* inc_opt */);
 				 true /* inc_opt */);
+out:
+	dev_put(netdev);
 }
 }
 
 
 static bool is_neigh_solicit(u8 *buf, size_t len)
 static bool is_neigh_solicit(u8 *buf, size_t len)

+ 4 - 1
drivers/net/wireless/ath/ath9k/htc_drv_main.c

@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
 
 	if ((vif->type == NL80211_IFTYPE_AP ||
 	if ((vif->type == NL80211_IFTYPE_AP ||
 	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
 	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
-	    bss_conf->enable_beacon)
+	    bss_conf->enable_beacon) {
 		priv->reconfig_beacon = true;
 		priv->reconfig_beacon = true;
+		priv->rearm_ani = true;
+	}
 
 
 	if (bss_conf->assoc) {
 	if (bss_conf->assoc) {
 		priv->rearm_ani = true;
 		priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
 
 	ath9k_htc_ps_wakeup(priv);
 	ath9k_htc_ps_wakeup(priv);
 
 
+	ath9k_htc_stop_ani(priv);
 	del_timer_sync(&priv->tx.cleanup_timer);
 	del_timer_sync(&priv->tx.cleanup_timer);
 	ath9k_htc_tx_drain(priv);
 	ath9k_htc_tx_drain(priv);
 
 

+ 1 - 1
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c

@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
 	if (!err) {
 	if (!err) {
 		/* only set 2G bandwidth using bw_cap command */
 		/* only set 2G bandwidth using bw_cap command */
 		band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
 		band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
 		err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
 		err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
 					       sizeof(band_bwcap));
 					       sizeof(band_bwcap));
 	} else {
 	} else {

+ 3 - 3
drivers/net/wireless/iwlwifi/mvm/coex.c

@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
 
 	if (IWL_MVM_BT_COEX_CORUNNING) {
 	if (IWL_MVM_BT_COEX_CORUNNING) {
-		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-						    BT_VALID_CORUN_LUT_40);
+		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+						     BT_VALID_CORUN_LUT_40);
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
 	}
 	}
 
 
 	if (IWL_MVM_BT_COEX_MPLUT) {
 	if (IWL_MVM_BT_COEX_MPLUT) {
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
 	}
 	}
 
 
 	if (mvm->cfg->bt_shared_single_ant)
 	if (mvm->cfg->bt_shared_single_ant)

+ 4 - 4
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h

@@ -183,9 +183,9 @@ enum iwl_scan_type {
  *	this number of packets were received (typically 1)
  *	this number of packets were received (typically 1)
  * @passive2active: is auto switching from passive to active during scan allowed
  * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
  * @suspend_time: how long to pause scan when returning to service channel:
- *	bits 0-19: beacon interal in usecs (suspend before executing)
+ *	bits 0-19: beacon interal in TUs (suspend before executing)
  *	bits 20-23: reserved
  *	bits 20-23: reserved
  *	bits 24-31: number of beacons (suspend between channels)
  *	bits 24-31: number of beacons (suspend between channels)
  * @rxon_flags: RXON_FLG_*
  * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
  * @quiet_plcp_th:	quiet channel num of packets threshold
  * @quiet_plcp_th:	quiet channel num of packets threshold
  * @good_CRC_th:	passive to active promotion threshold
  * @good_CRC_th:	passive to active promotion threshold
  * @rx_chain:		RXON rx chain.
  * @rx_chain:		RXON rx chain.
- * @max_out_time:	max uSec to be out of assoceated channel
- * @suspend_time:	pause scan this long when returning to service channel
+ * @max_out_time:	max TUs to be out of assoceated channel
+ * @suspend_time:	pause scan this TUs when returning to service channel
  * @flags:		RXON flags
  * @flags:		RXON flags
  * @filter_flags:	RXONfilter
  * @filter_flags:	RXONfilter
  * @tx_cmd:		tx command for active scan; for 2GHz and for 5GHz.
  * @tx_cmd:		tx command for active scan; for 2GHz and for 5GHz.

+ 7 - 2
drivers/net/wireless/iwlwifi/mvm/mac80211.c

@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
 
-	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
 	if (ret)
 	if (ret)
 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
 }
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
 	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
 	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
 		return;
 		return;
 
 
-	ieee80211_iterate_active_interfaces(
+	ieee80211_iterate_active_interfaces_atomic(
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_mc_iface_iterator, &iter_data);
 		iwl_mvm_mc_iface_iterator, &iter_data);
 }
 }
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
 
 	mutex_lock(&mvm->mutex);
 	mutex_lock(&mvm->mutex);
 
 
+	if (!iwl_mvm_is_idle(mvm)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
 	switch (mvm->scan_status) {
 	switch (mvm->scan_status) {
 	case IWL_MVM_SCAN_OS:
 	case IWL_MVM_SCAN_OS:
 		IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
 		IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");

+ 3 - 0
drivers/net/wireless/iwlwifi/mvm/mvm.h

@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
 	return mvmvif->low_latency;
 	return mvmvif->low_latency;
 }
 }
 
 
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
 /* Thermal management and CT-kill */
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);

+ 1 - 1
drivers/net/wireless/iwlwifi/mvm/rs.c

@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 		return;
 		return;
 	}
 	}
 
 
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
 	/* Disable last tx check if we are debugging with fixed rate */
 	/* Disable last tx check if we are debugging with fixed rate */
 	if (lq_sta->dbg_fixed_rate) {
 	if (lq_sta->dbg_fixed_rate) {
 		IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
 		IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");

+ 13 - 42
drivers/net/wireless/iwlwifi/mvm/scan.c

@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    iwl_mvm_scan_condition_iterator,
 					    iwl_mvm_scan_condition_iterator,
 					    &global_bound);
 					    &global_bound);
-	/*
-	 * Under low latency traffic passive scan is fragmented meaning
-	 * that dwell on a particular channel will be fragmented. Each fragment
-	 * dwell time is 20ms and fragments period is 105ms. Skipping to next
-	 * channel will be delayed by the same period - 105ms. So suspend_time
-	 * parameter describing both fragments and channels skipping periods is
-	 * set to 105ms. This value is chosen so that overall passive scan
-	 * duration will not be too long. Max_out_time in this case is set to
-	 * 70ms, so for active scanning operating channel will be left for 70ms
-	 * while for passive still for 20ms (fragment dwell).
-	 */
-	if (global_bound) {
-		if (!iwl_mvm_low_latency(mvm)) {
-			params->suspend_time = ieee80211_tu_to_usec(100);
-			params->max_out_time = ieee80211_tu_to_usec(600);
-		} else {
-			params->suspend_time = ieee80211_tu_to_usec(105);
-			/* P2P doesn't support fragmented passive scan, so
-			 * configure max_out_time to be at least longest dwell
-			 * time for passive scan.
-			 */
-			if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-				params->max_out_time = ieee80211_tu_to_usec(70);
-				params->passive_fragmented = true;
-			} else {
-				u32 passive_dwell;
 
 
-				/*
-				 * Use band G so that passive channel dwell time
-				 * will be assigned with maximum value.
-				 */
-				band = IEEE80211_BAND_2GHZ;
-				passive_dwell = iwl_mvm_get_passive_dwell(band);
-				params->max_out_time =
-					ieee80211_tu_to_usec(passive_dwell);
-			}
-		}
+	if (!global_bound)
+		goto not_bound;
+
+	params->suspend_time = 100;
+	params->max_out_time = 600;
+
+	if (iwl_mvm_low_latency(mvm)) {
+		params->suspend_time = 250;
+		params->max_out_time = 250;
 	}
 	}
 
 
+not_bound:
+
 	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
 	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-		if (params->passive_fragmented)
-			params->dwell[band].passive = 20;
-		else
-			params->dwell[band].passive =
-				iwl_mvm_get_passive_dwell(band);
+		params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
 		params->dwell[band].active = iwl_mvm_get_active_dwell(band,
 		params->dwell[band].active = iwl_mvm_get_active_dwell(band,
 								      n_ssids);
 								      n_ssids);
 	}
 	}
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
 	int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
 	int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
 	int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
 	int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
 	int head = 0;
 	int head = 0;
-	int tail = band_2ghz + band_5ghz;
+	int tail = band_2ghz + band_5ghz - 1;
 	u32 ssid_bitmap;
 	u32 ssid_bitmap;
 	int cmd_len;
 	int cmd_len;
 	int ret;
 	int ret;

+ 19 - 0
drivers/net/wireless/iwlwifi/mvm/utils.c

@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 
 
 	return result;
 	return result;
 }
 }
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+	bool *idle = _data;
+
+	if (!vif->bss_conf.idle)
+		*idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+	bool idle = true;
+
+	ieee80211_iterate_active_interfaces_atomic(
+			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_idle_iter, &idle);
+
+	return idle;
+}

+ 6 - 4
drivers/net/wireless/iwlwifi/pcie/trans.c

@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 	 * PCI Tx retries from interfering with C3 CPU state */
 	 * PCI Tx retries from interfering with C3 CPU state */
 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
 
+	trans->dev = &pdev->dev;
+	trans_pcie->pci_dev = pdev;
+	iwl_disable_interrupts(trans);
+
 	err = pci_enable_msi(pdev);
 	err = pci_enable_msi(pdev);
 	if (err) {
 	if (err) {
 		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
 		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 		}
 		}
 	}
 	}
 
 
-	trans->dev = &pdev->dev;
-	trans_pcie->pci_dev = pdev;
 	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
 	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
 	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
 	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
 	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
 	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 		goto out_pci_disable_msi;
 		goto out_pci_disable_msi;
 	}
 	}
 
 
-	trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
 	if (iwl_pcie_alloc_ict(trans))
 	if (iwl_pcie_alloc_ict(trans))
 		goto out_free_cmd_pool;
 		goto out_free_cmd_pool;
 
 
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 		goto out_free_ict;
 		goto out_free_ict;
 	}
 	}
 
 
+	trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
 	return trans;
 	return trans;
 
 
 out_free_ict:
 out_free_ict:

+ 1 - 1
drivers/net/xen-netback/common.h

@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
 			      grant_ref_t rx_ring_ref);
 			      grant_ref_t rx_ring_ref);
 
 
 /* Check for SKBs from frontend and schedule backend processing */
 /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
 
 
 /* Prevent the device from generating any further traffic. */
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
 void xenvif_carrier_off(struct xenvif *vif);

+ 3 - 27
drivers/net/xen-netback/interface.c

@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
 	work_done = xenvif_tx_action(vif, budget);
 	work_done = xenvif_tx_action(vif, budget);
 
 
 	if (work_done < budget) {
 	if (work_done < budget) {
-		int more_to_do = 0;
-		unsigned long flags;
-
-		/* It is necessary to disable IRQ before calling
-		 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-		 * lose event from the frontend.
-		 *
-		 * Consider:
-		 *   RING_HAS_UNCONSUMED_REQUESTS
-		 *   <frontend generates event to trigger napi_schedule>
-		 *   __napi_complete
-		 *
-		 * This handler is still in scheduled state so the
-		 * event has no effect at all. After __napi_complete
-		 * this handler is descheduled and cannot get
-		 * scheduled again. We lose event in this case and the ring
-		 * will be completely stalled.
-		 */
-
-		local_irq_save(flags);
-
-		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-		if (!more_to_do)
-			__napi_complete(napi);
-
-		local_irq_restore(flags);
+		napi_complete(napi);
+		xenvif_napi_schedule_or_enable_events(vif);
 	}
 	}
 
 
 	return work_done;
 	return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
 	enable_irq(vif->tx_irq);
 	enable_irq(vif->tx_irq);
 	if (vif->tx_irq != vif->rx_irq)
 	if (vif->tx_irq != vif->rx_irq)
 		enable_irq(vif->rx_irq);
 		enable_irq(vif->rx_irq);
-	xenvif_check_rx_xenvif(vif);
+	xenvif_napi_schedule_or_enable_events(vif);
 }
 }
 
 
 static void xenvif_down(struct xenvif *vif)
 static void xenvif_down(struct xenvif *vif)

+ 82 - 20
drivers/net/xen-netback/netback.c

@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
 
 
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
  */
  */
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
 {
 {
 	u16 pending_idx = ubuf->desc;
 	u16 pending_idx = ubuf->desc;
 	struct pending_tx_info *temp =
 	struct pending_tx_info *temp =
@@ -322,6 +322,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
 	}
 	}
 }
 }
 
 
+/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+						const int i,
+						const struct ubuf_info *ubuf)
+{
+	struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+	do {
+		u16 pending_idx = ubuf->desc;
+
+		if (skb_shinfo(skb)->frags[i].page.p ==
+		    foreign_vif->mmap_pages[pending_idx])
+			break;
+		ubuf = (struct ubuf_info *) ubuf->ctx;
+	} while (ubuf);
+
+	return ubuf;
+}
+
 /*
 /*
  * Prepare an SKB to be transmitted to the frontend.
  * Prepare an SKB to be transmitted to the frontend.
  *
  *
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 	int head = 1;
 	int head = 1;
 	int old_meta_prod;
 	int old_meta_prod;
 	int gso_type;
 	int gso_type;
-	struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-	grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
-	struct xenvif *foreign_vif = NULL;
+	const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+	const struct ubuf_info *const head_ubuf = ubuf;
 
 
 	old_meta_prod = npo->meta_prod;
 	old_meta_prod = npo->meta_prod;
 
 
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 	npo->copy_off = 0;
 	npo->copy_off = 0;
 	npo->copy_gref = req->gref;
 	npo->copy_gref = req->gref;
 
 
-	if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-		 (ubuf->callback == &xenvif_zerocopy_callback)) {
-		int i = 0;
-		foreign_vif = ubuf_to_vif(ubuf);
-
-		do {
-			u16 pending_idx = ubuf->desc;
-			foreign_grefs[i++] =
-				foreign_vif->pending_tx_info[pending_idx].req.gref;
-			ubuf = (struct ubuf_info *) ubuf->ctx;
-		} while (ubuf);
-	}
-
 	data = skb->data;
 	data = skb->data;
 	while (data < skb_tail_pointer(skb)) {
 	while (data < skb_tail_pointer(skb)) {
 		unsigned int offset = offset_in_page(data);
 		unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 	}
 	}
 
 
 	for (i = 0; i < nr_frags; i++) {
 	for (i = 0; i < nr_frags; i++) {
+		/* This variable also signals whether foreign_gref has a real
+		 * value or not.
+		 */
+		struct xenvif *foreign_vif = NULL;
+		grant_ref_t foreign_gref;
+
+		if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+			(ubuf->callback == &xenvif_zerocopy_callback)) {
+			const struct ubuf_info *const startpoint = ubuf;
+
+			/* Ideally ubuf points to the chain element which
+			 * belongs to this frag. Or if frags were removed from
+			 * the beginning, then shortly before it.
+			 */
+			ubuf = xenvif_find_gref(skb, i, ubuf);
+
+			/* Try again from the beginning of the list, if we
+			 * haven't tried from there. This only makes sense in
+			 * the unlikely event of reordering the original frags.
+			 * For injected local pages it's an unnecessary second
+			 * run.
+			 */
+			if (unlikely(!ubuf) && startpoint != head_ubuf)
+				ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+			if (likely(ubuf)) {
+				u16 pending_idx = ubuf->desc;
+
+				foreign_vif = ubuf_to_vif(ubuf);
+				foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+				/* Just a safety measure. If this was the last
+				 * element on the list, the for loop will
+				 * iterate again if a local page were added to
+				 * the end. Using head_ubuf here prevents the
+				 * second search on the chain. Or the original
+				 * frags changed order, but that's less likely.
+				 * In any way, ubuf shouldn't be NULL.
+				 */
+				ubuf = ubuf->ctx ?
+					(struct ubuf_info *) ubuf->ctx :
+					head_ubuf;
+			} else
+				/* This frag was a local page, added to the
+				 * array after the skb left netback.
+				 */
+				ubuf = head_ubuf;
+		}
 		xenvif_gop_frag_copy(vif, skb, npo,
 		xenvif_gop_frag_copy(vif, skb, npo,
 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
 				     skb_shinfo(skb)->frags[i].page_offset,
 				     skb_shinfo(skb)->frags[i].page_offset,
 				     &head,
 				     &head,
 				     foreign_vif,
 				     foreign_vif,
-				     foreign_grefs[i]);
+				     foreign_vif ? foreign_gref : UINT_MAX);
 	}
 	}
 
 
 	return npo->meta_prod - old_meta_prod;
 	return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
 		notify_remote_via_irq(vif->rx_irq);
 		notify_remote_via_irq(vif->rx_irq);
 }
 }
 
 
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
 {
 {
 	int more_to_do;
 	int more_to_do;
 
 
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
 {
 {
 	struct xenvif *vif = (struct xenvif *)data;
 	struct xenvif *vif = (struct xenvif *)data;
 	tx_add_credit(vif);
 	tx_add_credit(vif);
-	xenvif_check_rx_xenvif(vif);
+	xenvif_napi_schedule_or_enable_events(vif);
 }
 }
 
 
 static void xenvif_tx_err(struct xenvif *vif,
 static void xenvif_tx_err(struct xenvif *vif,

+ 2 - 1
drivers/ptp/Kconfig

@@ -6,6 +6,7 @@ menu "PTP clock support"
 
 
 config PTP_1588_CLOCK
 config PTP_1588_CLOCK
 	tristate "PTP clock support"
 	tristate "PTP clock support"
+	depends on NET
 	select PPS
 	select PPS
 	select NET_PTP_CLASSIFY
 	select NET_PTP_CLASSIFY
 	help
 	help
@@ -74,7 +75,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_PCH
 config PTP_1588_CLOCK_PCH
 	tristate "Intel PCH EG20T as PTP clock"
 	tristate "Intel PCH EG20T as PTP clock"
 	depends on X86 || COMPILE_TEST
 	depends on X86 || COMPILE_TEST
-	depends on HAS_IOMEM
+	depends on HAS_IOMEM && NET
 	select PTP_1588_CLOCK
 	select PTP_1588_CLOCK
 	help
 	help
 	  This driver adds support for using the PCH EG20T as a PTP
 	  This driver adds support for using the PCH EG20T as a PTP

+ 1 - 2
drivers/scsi/scsi_transport_sas.c

@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
 	list_del(&rphy->list);
 	list_del(&rphy->list);
 	mutex_unlock(&sas_host->lock);
 	mutex_unlock(&sas_host->lock);
 
 
-	sas_bsg_remove(shost, rphy);
-
 	transport_destroy_device(dev);
 	transport_destroy_device(dev);
 
 
 	put_device(dev);
 	put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
 	}
 	}
 
 
 	sas_rphy_unlink(rphy);
 	sas_rphy_unlink(rphy);
+	sas_bsg_remove(NULL, rphy);
 	transport_remove_device(dev);
 	transport_remove_device(dev);
 	device_del(dev);
 	device_del(dev);
 }
 }

+ 19 - 0
fs/afs/cmservice.c

@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
 {
 {
 	_enter("");
 	_enter("");
 
 
+	/* Break the callbacks here so that we do it after the final ACK is
+	 * received.  The step number here must match the final number in
+	 * afs_deliver_cb_callback().
+	 */
+	if (call->unmarshall == 6) {
+		ASSERT(call->server && call->count && call->request);
+		afs_break_callbacks(call->server, call->count, call->request);
+	}
+
 	afs_put_server(call->server);
 	afs_put_server(call->server);
 	call->server = NULL;
 	call->server = NULL;
 	kfree(call->buffer);
 	kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
 		_debug("trailer");
 		_debug("trailer");
 		if (skb->len != 0)
 		if (skb->len != 0)
 			return -EBADMSG;
 			return -EBADMSG;
+
+		/* Record that the message was unmarshalled successfully so
+		 * that the call destructor can know do the callback breaking
+		 * work, even if the final ACK isn't received.
+		 *
+		 * If the step number changes, then afs_cm_destructor() must be
+		 * updated also.
+		 */
+		call->unmarshall++;
+	case 6:
 		break;
 		break;
 	}
 	}
 
 

+ 1 - 1
fs/afs/internal.h

@@ -75,7 +75,7 @@ struct afs_call {
 	const struct afs_call_type *type;	/* type of call */
 	const struct afs_call_type *type;	/* type of call */
 	const struct afs_wait_mode *wait_mode;	/* completion wait mode */
 	const struct afs_wait_mode *wait_mode;	/* completion wait mode */
 	wait_queue_head_t	waitq;		/* processes awaiting completion */
 	wait_queue_head_t	waitq;		/* processes awaiting completion */
-	work_func_t		async_workfn;
+	void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
 	struct work_struct	async_work;	/* asynchronous work processor */
 	struct work_struct	async_work;	/* asynchronous work processor */
 	struct work_struct	work;		/* actual work processor */
 	struct work_struct	work;		/* actual work processor */
 	struct sk_buff_head	rx_queue;	/* received packets */
 	struct sk_buff_head	rx_queue;	/* received packets */

+ 43 - 43
fs/afs/rxrpc.c

@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
 static int afs_wait_for_call_to_complete(struct afs_call *);
 static int afs_wait_for_call_to_complete(struct afs_call *);
 static void afs_wake_up_async_call(struct afs_call *);
 static void afs_wake_up_async_call(struct afs_call *);
 static int afs_dont_wait_for_call_to_complete(struct afs_call *);
 static int afs_dont_wait_for_call_to_complete(struct afs_call *);
-static void afs_process_async_call(struct work_struct *);
+static void afs_process_async_call(struct afs_call *);
 static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
 static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
 static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
 static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
 
 
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
 static struct sk_buff_head afs_incoming_calls;
 static struct sk_buff_head afs_incoming_calls;
 static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
 static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
 
 
+static void afs_async_workfn(struct work_struct *work)
+{
+	struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+	call->async_workfn(call);
+}
+
 /*
 /*
  * open an RxRPC socket and bind it to be a server for callback notifications
  * open an RxRPC socket and bind it to be a server for callback notifications
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -183,6 +190,28 @@ static void afs_free_call(struct afs_call *call)
 	kfree(call);
 	kfree(call);
 }
 }
 
 
+/*
+ * End a call but do not free it
+ */
+static void afs_end_call_nofree(struct afs_call *call)
+{
+	if (call->rxcall) {
+		rxrpc_kernel_end_call(call->rxcall);
+		call->rxcall = NULL;
+	}
+	if (call->type->destructor)
+		call->type->destructor(call);
+}
+
+/*
+ * End a call and free it
+ */
+static void afs_end_call(struct afs_call *call)
+{
+	afs_end_call_nofree(call);
+	afs_free_call(call);
+}
+
 /*
 /*
  * allocate a call with flat request and reply buffers
  * allocate a call with flat request and reply buffers
  */
  */
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 	       atomic_read(&afs_outstanding_calls));
 	       atomic_read(&afs_outstanding_calls));
 
 
 	call->wait_mode = wait_mode;
 	call->wait_mode = wait_mode;
-	INIT_WORK(&call->async_work, afs_process_async_call);
+	call->async_workfn = afs_process_async_call;
+	INIT_WORK(&call->async_work, afs_async_workfn);
 
 
 	memset(&srx, 0, sizeof(srx));
 	memset(&srx, 0, sizeof(srx));
 	srx.srx_family = AF_RXRPC;
 	srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
 	rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
 	rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
 	while ((skb = skb_dequeue(&call->rx_queue)))
 	while ((skb = skb_dequeue(&call->rx_queue)))
 		afs_free_skb(skb);
 		afs_free_skb(skb);
-	rxrpc_kernel_end_call(rxcall);
-	call->rxcall = NULL;
 error_kill_call:
 error_kill_call:
-	call->type->destructor(call);
-	afs_free_call(call);
+	afs_end_call(call);
 	_leave(" = %d", ret);
 	_leave(" = %d", ret);
 	return ret;
 	return ret;
 }
 }
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
 	if (call->state >= AFS_CALL_COMPLETE) {
 	if (call->state >= AFS_CALL_COMPLETE) {
 		while ((skb = skb_dequeue(&call->rx_queue)))
 		while ((skb = skb_dequeue(&call->rx_queue)))
 			afs_free_skb(skb);
 			afs_free_skb(skb);
-		if (call->incoming) {
-			rxrpc_kernel_end_call(call->rxcall);
-			call->rxcall = NULL;
-			call->type->destructor(call);
-			afs_free_call(call);
-		}
+		if (call->incoming)
+			afs_end_call(call);
 	}
 	}
 
 
 	_leave("");
 	_leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
 	}
 	}
 
 
 	_debug("call complete");
 	_debug("call complete");
-	rxrpc_kernel_end_call(call->rxcall);
-	call->rxcall = NULL;
-	call->type->destructor(call);
-	afs_free_call(call);
+	afs_end_call(call);
 	_leave(" = %d", ret);
 	_leave(" = %d", ret);
 	return ret;
 	return ret;
 }
 }
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
 /*
 /*
  * delete an asynchronous call
  * delete an asynchronous call
  */
  */
-static void afs_delete_async_call(struct work_struct *work)
+static void afs_delete_async_call(struct afs_call *call)
 {
 {
-	struct afs_call *call =
-		container_of(work, struct afs_call, async_work);
-
 	_enter("");
 	_enter("");
 
 
 	afs_free_call(call);
 	afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
  * - on a multiple-thread workqueue this work item may try to run on several
  * - on a multiple-thread workqueue this work item may try to run on several
  *   CPUs at the same time
  *   CPUs at the same time
  */
  */
-static void afs_process_async_call(struct work_struct *work)
+static void afs_process_async_call(struct afs_call *call)
 {
 {
-	struct afs_call *call =
-		container_of(work, struct afs_call, async_work);
-
 	_enter("");
 	_enter("");
 
 
 	if (!skb_queue_empty(&call->rx_queue))
 	if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
 		call->reply = NULL;
 		call->reply = NULL;
 
 
 		/* kill the call */
 		/* kill the call */
-		rxrpc_kernel_end_call(call->rxcall);
-		call->rxcall = NULL;
-		if (call->type->destructor)
-			call->type->destructor(call);
+		afs_end_call_nofree(call);
 
 
 		/* we can't just delete the call because the work item may be
 		/* we can't just delete the call because the work item may be
 		 * queued */
 		 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
 	call->reply_size += len;
 	call->reply_size += len;
 }
 }
 
 
-static void afs_async_workfn(struct work_struct *work)
-{
-	struct afs_call *call = container_of(work, struct afs_call, async_work);
-
-	call->async_workfn(work);
-}
-
 /*
 /*
  * accept the backlog of incoming calls
  * accept the backlog of incoming calls
  */
  */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
 		_debug("oom");
 		_debug("oom");
 		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
 		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
 	default:
 	default:
-		rxrpc_kernel_end_call(call->rxcall);
-		call->rxcall = NULL;
-		call->type->destructor(call);
-		afs_free_call(call);
+		afs_end_call(call);
 		_leave(" [error]");
 		_leave(" [error]");
 		return;
 		return;
 	}
 	}
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
 	call->state = AFS_CALL_AWAIT_ACK;
 	call->state = AFS_CALL_AWAIT_ACK;
 	n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
 	n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
 	if (n >= 0) {
 	if (n >= 0) {
+		/* Success */
 		_leave(" [replied]");
 		_leave(" [replied]");
 		return;
 		return;
 	}
 	}
+
 	if (n == -ENOMEM) {
 	if (n == -ENOMEM) {
 		_debug("oom");
 		_debug("oom");
 		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
 		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
 	}
 	}
-	rxrpc_kernel_end_call(call->rxcall);
-	call->rxcall = NULL;
-	call->type->destructor(call);
-	afs_free_call(call);
+	afs_end_call(call);
 	_leave(" [error]");
 	_leave(" [error]");
 }
 }
 
 

+ 1 - 1
fs/nfsd/nfs4acl.c

@@ -590,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
 		add_to_mask(state, &state->groups->aces[i].perms);
 		add_to_mask(state, &state->groups->aces[i].perms);
 	}
 	}
 
 
-	if (!state->users->n && !state->groups->n) {
+	if (state->users->n || state->groups->n) {
 		pace++;
 		pace++;
 		pace->e_tag = ACL_MASK;
 		pace->e_tag = ACL_MASK;
 		low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
 		low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);

+ 13 - 2
fs/nfsd/nfs4state.c

@@ -3717,9 +3717,16 @@ out:
 static __be32
 static __be32
 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
 {
 {
-	if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
+	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
+
+	if (check_for_locks(stp->st_file, lo))
 		return nfserr_locks_held;
 		return nfserr_locks_held;
-	release_lock_stateid(stp);
+	/*
+	 * Currently there's a 1-1 lock stateid<->lockowner
+	 * correspondance, and we have to delete the lockowner when we
+	 * delete the lock stateid:
+	 */
+	unhash_lockowner(lo);
 	return nfs_ok;
 	return nfs_ok;
 }
 }
 
 
@@ -4159,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
 
 
 	if (!same_owner_str(&lo->lo_owner, owner, clid))
 	if (!same_owner_str(&lo->lo_owner, owner, clid))
 		return false;
 		return false;
+	if (list_empty(&lo->lo_owner.so_stateids)) {
+		WARN_ON_ONCE(1);
+		return false;
+	}
 	lst = list_first_entry(&lo->lo_owner.so_stateids,
 	lst = list_first_entry(&lo->lo_owner.so_stateids,
 			       struct nfs4_ol_stateid, st_perstateowner);
 			       struct nfs4_ol_stateid, st_perstateowner);
 	return lst->st_file->fi_inode == inode;
 	return lst->st_file->fi_inode == inode;

Vissa filer visades inte eftersom för många filer har ändrats