浏览代码

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "This set fixes a bunch of fallout from the changes that went in during
  this merge window, particularly:

   - Fix fsl_pq_mdio (Claudiu Manoil) and fm10k (Pranith Kumar) build
     failures.

   - Several networking drivers do atomic_set() on page counts where
     that's not exactly legal.  From Eric Dumazet.

   - Make __skb_flow_get_ports() work cleanly with unaligned data, from
     Alexander Duyck.

   - Fix some kernel-doc buglets in rfkill and netlabel, from Fabian
     Frederick.

   - Unbalanced enable_irq_wake usage in bcmgenet and systemport
     drivers, from Florian Fainelli.

   - pxa168_eth needs to depend on HAS_DMA, from Geert Uytterhoeven.

   - Multi-dequeue in the qdisc layer severely bypasses the fairness
     limits the previous code used to enforce, reintroduce in a way that
     at the same time doesn't compromise bulk dequeue opportunities.
     From Jesper Dangaard Brouer.

   - macvlan receive path unnecessarily hops through a softirq by using
     netif_rx() instead of netif_receive_skb().  From Jason Baron"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (51 commits)
  net: systemport: avoid unbalanced enable_irq_wake calls
  net: bcmgenet: avoid unbalanced enable_irq_wake calls
  net: bcmgenet: fix off-by-one in incrementing read pointer
  net: fix races in page->_count manipulation
  mlx4: fix race accessing page->_count
  ixgbe: fix race accessing page->_count
  igb: fix race accessing page->_count
  fm10k: fix race accessing page->_count
  net/phy: micrel: Add clock support for KSZ8021/KSZ8031
  flow-dissector: Fix alignment issue in __skb_flow_get_ports
  net: filter: fix the comments
  Documentation: replace __sk_run_filter with __bpf_prog_run
  macvlan: optimize the receive path
  macvlan: pass 'bool' type to macvlan_count_rx()
  drivers: net: xgene: Add 10GbE ethtool support
  drivers: net: xgene: Add 10GbE support
  drivers: net: xgene: Preparing for adding 10GbE support
  dtb: Add 10GbE node to APM X-Gene SoC device tree
  Documentation: dts: Update section header for APM X-Gene
  MAINTAINERS: Update APM X-Gene section
  ...
Linus Torvalds 10 年之前
父节点
当前提交
ca321885b0
共有 62 个文件被更改,包括 1017 次插入447 次删除
  1. 3 1
      Documentation/devicetree/bindings/net/apm-xgene-enet.txt
  2. 6 0
      Documentation/devicetree/bindings/net/micrel.txt
  3. 2 2
      Documentation/networking/filter.txt
  4. 0 1
      MAINTAINERS
  5. 4 0
      arch/arm64/boot/dts/apm-mustang.dts
  6. 27 2
      arch/arm64/boot/dts/apm-storm.dtsi
  7. 2 1
      drivers/net/ethernet/apm/xgene/Makefile
  8. 22 6
      drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
  9. 30 14
      drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
  10. 8 22
      drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
  11. 61 25
      drivers/net/ethernet/apm/xgene/xgene_enet_main.c
  12. 21 3
      drivers/net/ethernet/apm/xgene/xgene_enet_main.h
  13. 331 0
      drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
  14. 57 0
      drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
  15. 2 1
      drivers/net/ethernet/broadcom/bcmsysport.c
  16. 4 5
      drivers/net/ethernet/broadcom/genet/bcmgenet.c
  17. 3 1
      drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
  18. 1 1
      drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
  19. 5 1
      drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
  20. 4 1
      drivers/net/ethernet/chelsio/cxgb4/sge.c
  21. 10 7
      drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
  22. 1 0
      drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
  23. 2 3
      drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
  24. 8 4
      drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
  25. 4 1
      drivers/net/ethernet/chelsio/cxgb4vf/sge.c
  26. 6 0
      drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
  27. 7 3
      drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
  28. 1 1
      drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
  29. 1 1
      drivers/net/ethernet/freescale/fs_enet/mac-scc.c
  30. 34 22
      drivers/net/ethernet/freescale/fsl_pq_mdio.c
  31. 37 31
      drivers/net/ethernet/freescale/gianfar.c
  32. 31 0
      drivers/net/ethernet/freescale/gianfar.h
  33. 1 0
      drivers/net/ethernet/intel/Kconfig
  34. 3 4
      drivers/net/ethernet/intel/fm10k/fm10k_main.c
  35. 3 4
      drivers/net/ethernet/intel/igb/igb_main.c
  36. 3 5
      drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
  37. 2 1
      drivers/net/ethernet/marvell/Kconfig
  38. 3 3
      drivers/net/ethernet/mellanox/mlx4/en_rx.c
  39. 2 2
      drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
  40. 13 8
      drivers/net/macvlan.c
  41. 29 2
      drivers/net/phy/micrel.c
  42. 83 15
      drivers/net/usb/r8152.c
  43. 3 1
      drivers/net/wireless/ath/ath9k/ath9k.h
  44. 2 10
      drivers/net/wireless/ath/ath9k/beacon.c
  45. 1 0
      drivers/net/wireless/ath/ath9k/htc_drv_init.c
  46. 1 1
      drivers/net/wireless/ath/ath9k/main.c
  47. 7 1
      drivers/net/wireless/ath/ath9k/tx99.c
  48. 23 11
      drivers/net/wireless/ath/ath9k/xmit.c
  49. 4 4
      drivers/net/wireless/ath/main.c
  50. 25 11
      drivers/net/wireless/rtl818x/rtl8180/dev.c
  51. 1 1
      drivers/net/wireless/rtlwifi/wifi.h
  52. 1 0
      include/linux/micrel_phy.h
  53. 2 155
      include/net/netfilter/ipv6/nf_reject.h
  54. 1 1
      include/uapi/linux/netfilter/nf_tables.h
  55. 1 0
      net/Kconfig
  56. 3 6
      net/core/filter.c
  57. 23 13
      net/core/flow_dissector.c
  58. 23 12
      net/core/skbuff.c
  59. 4 6
      net/netfilter/nft_reject.c
  60. 0 1
      net/netlabel/netlabel_kapi.c
  61. 2 2
      net/rfkill/core.c
  62. 13 7
      net/sched/sch_generic.c

+ 3 - 1
Documentation/devicetree/bindings/net/apm-xgene-enet.txt

@@ -3,7 +3,7 @@ APM X-Gene SoC Ethernet nodes
 Ethernet nodes are defined to describe on-chip ethernet interfaces in
 Ethernet nodes are defined to describe on-chip ethernet interfaces in
 APM X-Gene SoC.
 APM X-Gene SoC.
 
 
-Required properties:
+Required properties for all the ethernet interfaces:
 - compatible: Should be "apm,xgene-enet"
 - compatible: Should be "apm,xgene-enet"
 - reg: Address and length of the register set for the device. It contains the
 - reg: Address and length of the register set for the device. It contains the
   information of registers in the same order as described by reg-names
   information of registers in the same order as described by reg-names
@@ -15,6 +15,8 @@ Required properties:
 - clocks: Reference to the clock entry.
 - clocks: Reference to the clock entry.
 - local-mac-address: MAC address assigned to this device
 - local-mac-address: MAC address assigned to this device
 - phy-connection-type: Interface type between ethernet device and PHY device
 - phy-connection-type: Interface type between ethernet device and PHY device
+
+Required properties for ethernet interfaces that have external PHY:
 - phy-handle: Reference to a PHY node connected to this device
 - phy-handle: Reference to a PHY node connected to this device
 
 
 - mdio: Device tree subnode with the following required properties:
 - mdio: Device tree subnode with the following required properties:

+ 6 - 0
Documentation/devicetree/bindings/net/micrel.txt

@@ -16,3 +16,9 @@ Optional properties:
 	      KSZ8051: register 0x1f, bits 5..4
 	      KSZ8051: register 0x1f, bits 5..4
 
 
               See the respective PHY datasheet for the mode values.
               See the respective PHY datasheet for the mode values.
+
+ - clocks, clock-names: contains clocks according to the common clock bindings.
+
+              supported clocks:
+	      - KSZ8021, KSZ8031: "rmii-ref": The RMII refence input clock. Used
+		to determine the XI input clock.

+ 2 - 2
Documentation/networking/filter.txt

@@ -700,11 +700,11 @@ Some core changes of the new internal format:
     bpf_exit
     bpf_exit
 
 
   If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
   If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
-  returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
+  returns will be seamless. Without JIT, __bpf_prog_run() interpreter needs to
   be used to call into f2.
   be used to call into f2.
 
 
   For practical reasons all eBPF programs have only one argument 'ctx' which is
   For practical reasons all eBPF programs have only one argument 'ctx' which is
-  already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
+  already placed into R1 (e.g. on __bpf_prog_run() startup) and the programs
   can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
   can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
   are currently not supported, but these restrictions can be lifted if necessary
   are currently not supported, but these restrictions can be lifted if necessary
   in the future.
   in the future.

+ 0 - 1
MAINTAINERS

@@ -734,7 +734,6 @@ F:	net/appletalk/
 APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
 APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
 M:	Iyappan Subramanian <isubramanian@apm.com>
 M:	Iyappan Subramanian <isubramanian@apm.com>
 M:	Keyur Chudgar <kchudgar@apm.com>
 M:	Keyur Chudgar <kchudgar@apm.com>
-M:	Ravi Patel <rapatel@apm.com>
 S:	Supported
 S:	Supported
 F:	drivers/net/ethernet/apm/xgene/
 F:	drivers/net/ethernet/apm/xgene/
 F:	Documentation/devicetree/bindings/net/apm-xgene-enet.txt
 F:	Documentation/devicetree/bindings/net/apm-xgene-enet.txt

+ 4 - 0
arch/arm64/boot/dts/apm-mustang.dts

@@ -40,3 +40,7 @@
 &menet {
 &menet {
 	status = "ok";
 	status = "ok";
 };
 };
+
+&xgenet {
+	status = "ok";
+};

+ 27 - 2
arch/arm64/boot/dts/apm-storm.dtsi

@@ -176,6 +176,16 @@
 				clock-output-names = "menetclk";
 				clock-output-names = "menetclk";
 			};
 			};
 
 
+			xge0clk: xge0clk@1f61c000 {
+				compatible = "apm,xgene-device-clock";
+				#clock-cells = <1>;
+				clocks = <&socplldiv2 0>;
+				reg = <0x0 0x1f61c000 0x0 0x1000>;
+				reg-names = "csr-reg";
+				csr-mask = <0x3>;
+				clock-output-names = "xge0clk";
+			};
+
 			sataphy1clk: sataphy1clk@1f21c000 {
 			sataphy1clk: sataphy1clk@1f21c000 {
 				compatible = "apm,xgene-device-clock";
 				compatible = "apm,xgene-device-clock";
 				#clock-cells = <1>;
 				#clock-cells = <1>;
@@ -585,7 +595,8 @@
 			interrupts = <0x0 0x3c 0x4>;
 			interrupts = <0x0 0x3c 0x4>;
 			dma-coherent;
 			dma-coherent;
 			clocks = <&menetclk 0>;
 			clocks = <&menetclk 0>;
-			local-mac-address = [00 01 73 00 00 01];
+			/* mac address will be overwritten by the bootloader */
+			local-mac-address = [00 00 00 00 00 00];
 			phy-connection-type = "rgmii";
 			phy-connection-type = "rgmii";
 			phy-handle = <&menetphy>;
 			phy-handle = <&menetphy>;
 			mdio {
 			mdio {
@@ -600,12 +611,26 @@
 			};
 			};
 		};
 		};
 
 
+		xgenet: ethernet@1f610000 {
+			compatible = "apm,xgene-enet";
+			status = "disabled";
+			reg = <0x0 0x1f610000 0x0 0xd100>,
+			      <0x0 0x1f600000 0x0 0X400>,
+			      <0x0 0x18000000 0x0 0X200>;
+			reg-names = "enet_csr", "ring_csr", "ring_cmd";
+			interrupts = <0x0 0x60 0x4>;
+			dma-coherent;
+			clocks = <&xge0clk 0>;
+			/* mac address will be overwritten by the bootloader */
+			local-mac-address = [00 00 00 00 00 00];
+			phy-connection-type = "xgmii";
+		};
+
 		rng: rng@10520000 {
 		rng: rng@10520000 {
 			compatible = "apm,xgene-rng";
 			compatible = "apm,xgene-rng";
 			reg = <0x0 0x10520000 0x0 0x100>;
 			reg = <0x0 0x10520000 0x0 0x100>;
 			interrupts = <0x0 0x41 0x4>;
 			interrupts = <0x0 0x41 0x4>;
 			clocks = <&rngpkaclk 0>;
 			clocks = <&rngpkaclk 0>;
 		};
 		};
-
 	};
 	};
 };
 };

+ 2 - 1
drivers/net/ethernet/apm/xgene/Makefile

@@ -2,5 +2,6 @@
 # Makefile for APM X-Gene Ethernet Driver.
 # Makefile for APM X-Gene Ethernet Driver.
 #
 #
 
 
-xgene-enet-objs := xgene_enet_hw.o xgene_enet_main.o xgene_enet_ethtool.o
+xgene-enet-objs := xgene_enet_hw.o xgene_enet_xgmac.o \
+		   xgene_enet_main.o xgene_enet_ethtool.o
 obj-$(CONFIG_NET_XGENE) += xgene-enet.o
 obj-$(CONFIG_NET_XGENE) += xgene-enet.o

+ 22 - 6
drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c

@@ -59,10 +59,22 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct phy_device *phydev = pdata->phy_dev;
 	struct phy_device *phydev = pdata->phy_dev;
 
 
-	if (phydev == NULL)
-		return -ENODEV;
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+		if (phydev == NULL)
+			return -ENODEV;
 
 
-	return phy_ethtool_gset(phydev, cmd);
+		return phy_ethtool_gset(phydev, cmd);
+	}
+
+	cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
+	cmd->advertising = cmd->supported;
+	ethtool_cmd_speed_set(cmd, SPEED_10000);
+	cmd->duplex = DUPLEX_FULL;
+	cmd->port = PORT_FIBRE;
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->autoneg = AUTONEG_DISABLE;
+
+	return 0;
 }
 }
 
 
 static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
@@ -70,10 +82,14 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct phy_device *phydev = pdata->phy_dev;
 	struct phy_device *phydev = pdata->phy_dev;
 
 
-	if (phydev == NULL)
-		return -ENODEV;
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+		if (phydev == NULL)
+			return -ENODEV;
+
+		return phy_ethtool_sset(phydev, cmd);
+	}
 
 
-	return phy_ethtool_sset(phydev, cmd);
+	return -EINVAL;
 }
 }
 
 
 static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data)

+ 30 - 14
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c

@@ -402,7 +402,7 @@ static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
 	return data;
 	return data;
 }
 }
 
 
-void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
 {
 {
 	u32 addr0, addr1;
 	u32 addr0, addr1;
 	u8 *dev_addr = pdata->ndev->dev_addr;
 	u8 *dev_addr = pdata->ndev->dev_addr;
@@ -436,13 +436,13 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
 	return 0;
 	return 0;
 }
 }
 
 
-void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
 {
 {
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
 }
 }
 
 
-void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed)
+static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
 {
 {
 	u32 value, mc2;
 	u32 value, mc2;
 	u32 intf_ctl, rgmii;
 	u32 intf_ctl, rgmii;
@@ -456,7 +456,7 @@ void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed)
 	xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
 	xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
 	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
 	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
 
 
-	switch (speed) {
+	switch (pdata->phy_speed) {
 	case SPEED_10:
 	case SPEED_10:
 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
 		CFG_MACMODE_SET(&icm0, 0);
 		CFG_MACMODE_SET(&icm0, 0);
@@ -525,8 +525,8 @@ static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
 }
 }
 
 
-void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
-			   u32 dst_ring_num, u16 bufpool_id)
+static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
+				  u32 dst_ring_num, u16 bufpool_id)
 {
 {
 	u32 cb;
 	u32 cb;
 	u32 fpsel;
 	u32 fpsel;
@@ -544,7 +544,7 @@ void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
 }
 }
 
 
-void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
 {
 {
 	u32 data;
 	u32 data;
 
 
@@ -552,7 +552,7 @@ void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
 }
 }
 
 
-void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
 {
 {
 	u32 data;
 	u32 data;
 
 
@@ -560,7 +560,7 @@ void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
 }
 }
 
 
-void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
 {
 {
 	u32 data;
 	u32 data;
 
 
@@ -568,7 +568,7 @@ void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
 }
 }
 
 
-void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
 {
 {
 	u32 data;
 	u32 data;
 
 
@@ -576,7 +576,7 @@ void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
 }
 }
 
 
-void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
 {
 {
 	u32 val;
 	u32 val;
 
 
@@ -593,7 +593,7 @@ void xgene_enet_reset(struct xgene_enet_pdata *pdata)
 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
 }
 }
 
 
-void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
+static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
 {
 {
 	clk_disable_unprepare(pdata->clk);
 	clk_disable_unprepare(pdata->clk);
 }
 }
@@ -627,10 +627,10 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
 
 
 	if (phydev->link) {
 	if (phydev->link) {
 		if (pdata->phy_speed != phydev->speed) {
 		if (pdata->phy_speed != phydev->speed) {
-			xgene_gmac_init(pdata, phydev->speed);
+			pdata->phy_speed = phydev->speed;
+			xgene_gmac_init(pdata);
 			xgene_gmac_rx_enable(pdata);
 			xgene_gmac_rx_enable(pdata);
 			xgene_gmac_tx_enable(pdata);
 			xgene_gmac_tx_enable(pdata);
-			pdata->phy_speed = phydev->speed;
 			phy_print_status(phydev);
 			phy_print_status(phydev);
 		}
 		}
 	} else {
 	} else {
@@ -726,3 +726,19 @@ void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
 	mdiobus_free(pdata->mdio_bus);
 	mdiobus_free(pdata->mdio_bus);
 	pdata->mdio_bus = NULL;
 	pdata->mdio_bus = NULL;
 }
 }
+
+struct xgene_mac_ops xgene_gmac_ops = {
+	.init = xgene_gmac_init,
+	.reset = xgene_gmac_reset,
+	.rx_enable = xgene_gmac_rx_enable,
+	.tx_enable = xgene_gmac_tx_enable,
+	.rx_disable = xgene_gmac_rx_disable,
+	.tx_disable = xgene_gmac_tx_disable,
+	.set_mac_addr = xgene_gmac_set_mac_addr,
+};
+
+struct xgene_port_ops xgene_gport_ops = {
+	.reset = xgene_enet_reset,
+	.cle_bypass = xgene_enet_cle_bypass,
+	.shutdown = xgene_gport_shutdown,
+};

+ 8 - 22
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h

@@ -42,6 +42,11 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
 	return (val & GENMASK(end, start)) >> start;
 	return (val & GENMASK(end, start)) >> start;
 }
 }
 
 
+enum xgene_enet_rm {
+	RM0,
+	RM3 = 3
+};
+
 #define CSR_RING_ID		0x0008
 #define CSR_RING_ID		0x0008
 #define OVERWRITE		BIT(31)
 #define OVERWRITE		BIT(31)
 #define IS_BUFFER_POOL		BIT(20)
 #define IS_BUFFER_POOL		BIT(20)
@@ -52,7 +57,6 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
 #define CSR_RING_WR_BASE	0x0070
 #define CSR_RING_WR_BASE	0x0070
 #define NUM_RING_CONFIG		5
 #define NUM_RING_CONFIG		5
 #define BUFPOOL_MODE		3
 #define BUFPOOL_MODE		3
-#define RM3			3
 #define INC_DEC_CMD_ADDR	0x002c
 #define INC_DEC_CMD_ADDR	0x002c
 #define UDP_HDR_SIZE		2
 #define UDP_HDR_SIZE		2
 #define BUF_LEN_CODE_2K		0x5000
 #define BUF_LEN_CODE_2K		0x5000
@@ -94,11 +98,9 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
 
 
 #define BLOCK_ETH_CSR_OFFSET		0x2000
 #define BLOCK_ETH_CSR_OFFSET		0x2000
 #define BLOCK_ETH_RING_IF_OFFSET	0x9000
 #define BLOCK_ETH_RING_IF_OFFSET	0x9000
-#define BLOCK_ETH_CLKRST_CSR_OFFSET	0xC000
 #define BLOCK_ETH_DIAG_CSR_OFFSET	0xD000
 #define BLOCK_ETH_DIAG_CSR_OFFSET	0xD000
 
 
 #define BLOCK_ETH_MAC_OFFSET		0x0000
 #define BLOCK_ETH_MAC_OFFSET		0x0000
-#define BLOCK_ETH_STATS_OFFSET		0x0014
 #define BLOCK_ETH_MAC_CSR_OFFSET	0x2800
 #define BLOCK_ETH_MAC_CSR_OFFSET	0x2800
 
 
 #define MAC_ADDR_REG_OFFSET		0x00
 #define MAC_ADDR_REG_OFFSET		0x00
@@ -107,12 +109,6 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
 #define MAC_READ_REG_OFFSET		0x0c
 #define MAC_READ_REG_OFFSET		0x0c
 #define MAC_COMMAND_DONE_REG_OFFSET	0x10
 #define MAC_COMMAND_DONE_REG_OFFSET	0x10
 
 
-#define STAT_ADDR_REG_OFFSET		0x00
-#define STAT_COMMAND_REG_OFFSET		0x04
-#define STAT_WRITE_REG_OFFSET		0x08
-#define STAT_READ_REG_OFFSET		0x0c
-#define STAT_COMMAND_DONE_REG_OFFSET	0x10
-
 #define MII_MGMT_CONFIG_ADDR		0x20
 #define MII_MGMT_CONFIG_ADDR		0x20
 #define MII_MGMT_COMMAND_ADDR		0x24
 #define MII_MGMT_COMMAND_ADDR		0x24
 #define MII_MGMT_ADDRESS_ADDR		0x28
 #define MII_MGMT_ADDRESS_ADDR		0x28
@@ -318,20 +314,10 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
 			    struct xgene_enet_pdata *pdata,
 			    struct xgene_enet_pdata *pdata,
 			    enum xgene_enet_err_code status);
 			    enum xgene_enet_err_code status);
 
 
-void xgene_enet_reset(struct xgene_enet_pdata *priv);
-void xgene_gmac_reset(struct xgene_enet_pdata *priv);
-void xgene_gmac_init(struct xgene_enet_pdata *priv, int speed);
-void xgene_gmac_tx_enable(struct xgene_enet_pdata *priv);
-void xgene_gmac_rx_enable(struct xgene_enet_pdata *priv);
-void xgene_gmac_tx_disable(struct xgene_enet_pdata *priv);
-void xgene_gmac_rx_disable(struct xgene_enet_pdata *priv);
-void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata);
-void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
-			   u32 dst_ring_num, u16 bufpool_id);
-void xgene_gport_shutdown(struct xgene_enet_pdata *priv);
-void xgene_gmac_get_tx_stats(struct xgene_enet_pdata *pdata);
-
 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
 
 
+extern struct xgene_mac_ops xgene_gmac_ops;
+extern struct xgene_port_ops xgene_gport_ops;
+
 #endif /* __XGENE_ENET_HW_H__ */
 #endif /* __XGENE_ENET_HW_H__ */

+ 61 - 25
drivers/net/ethernet/apm/xgene/xgene_enet_main.c

@@ -21,6 +21,7 @@
 
 
 #include "xgene_enet_main.h"
 #include "xgene_enet_main.h"
 #include "xgene_enet_hw.h"
 #include "xgene_enet_hw.h"
+#include "xgene_enet_xgmac.h"
 
 
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
 {
@@ -390,7 +391,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
 		}
 		}
 	}
 	}
 
 
-	return budget;
+	return count;
 }
 }
 
 
 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
@@ -413,7 +414,7 @@ static void xgene_enet_timeout(struct net_device *ndev)
 {
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 
 
-	xgene_gmac_reset(pdata);
+	pdata->mac_ops->reset(pdata);
 }
 }
 
 
 static int xgene_enet_register_irq(struct net_device *ndev)
 static int xgene_enet_register_irq(struct net_device *ndev)
@@ -445,18 +446,21 @@ static void xgene_enet_free_irq(struct net_device *ndev)
 static int xgene_enet_open(struct net_device *ndev)
 static int xgene_enet_open(struct net_device *ndev)
 {
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_mac_ops *mac_ops = pdata->mac_ops;
 	int ret;
 	int ret;
 
 
-	xgene_gmac_tx_enable(pdata);
-	xgene_gmac_rx_enable(pdata);
+	mac_ops->tx_enable(pdata);
+	mac_ops->rx_enable(pdata);
 
 
 	ret = xgene_enet_register_irq(ndev);
 	ret = xgene_enet_register_irq(ndev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 	napi_enable(&pdata->rx_ring->napi);
 	napi_enable(&pdata->rx_ring->napi);
 
 
-	if (pdata->phy_dev)
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
 		phy_start(pdata->phy_dev);
 		phy_start(pdata->phy_dev);
+	else
+		schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
 
 
 	netif_start_queue(ndev);
 	netif_start_queue(ndev);
 
 
@@ -466,18 +470,21 @@ static int xgene_enet_open(struct net_device *ndev)
 static int xgene_enet_close(struct net_device *ndev)
 static int xgene_enet_close(struct net_device *ndev)
 {
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_mac_ops *mac_ops = pdata->mac_ops;
 
 
 	netif_stop_queue(ndev);
 	netif_stop_queue(ndev);
 
 
-	if (pdata->phy_dev)
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
 		phy_stop(pdata->phy_dev);
 		phy_stop(pdata->phy_dev);
+	else
+		cancel_delayed_work_sync(&pdata->link_work);
 
 
 	napi_disable(&pdata->rx_ring->napi);
 	napi_disable(&pdata->rx_ring->napi);
 	xgene_enet_free_irq(ndev);
 	xgene_enet_free_irq(ndev);
 	xgene_enet_process_ring(pdata->rx_ring, -1);
 	xgene_enet_process_ring(pdata->rx_ring, -1);
 
 
-	xgene_gmac_tx_disable(pdata);
-	xgene_gmac_rx_disable(pdata);
+	mac_ops->tx_disable(pdata);
+	mac_ops->rx_disable(pdata);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -613,7 +620,6 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
 
 
 	ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
 	ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
-	pdata->rm = RM3;
 	ring = xgene_enet_setup_ring(ring);
 	ring = xgene_enet_setup_ring(ring);
 	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
 	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
 		   ring->num, ring->size, ring->id, ring->slots);
 		   ring->num, ring->size, ring->id, ring->slots);
@@ -724,7 +730,7 @@ static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
 	ret = eth_mac_addr(ndev, addr);
 	ret = eth_mac_addr(ndev, addr);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
-	xgene_gmac_set_mac_addr(pdata);
+	pdata->mac_ops->set_mac_addr(pdata);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -803,8 +809,13 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 
 
 	pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
 	pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
 	if (pdata->phy_mode < 0) {
 	if (pdata->phy_mode < 0) {
-		dev_err(dev, "Incorrect phy-connection-type in DTS\n");
-		return -EINVAL;
+		dev_err(dev, "Unable to get phy-connection-type\n");
+		return pdata->phy_mode;
+	}
+	if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
+	    pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
+		dev_err(dev, "Incorrect phy-connection-type specified\n");
+		return -ENODEV;
 	}
 	}
 
 
 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
@@ -819,12 +830,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
 	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
 	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
 	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
 	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
 	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
-	pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
-	pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET;
-	pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+		pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
+		pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+		pdata->rm = RM3;
+	} else {
+		pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
+		pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
+		pdata->rm = RM0;
+	}
 	pdata->rx_buff_cnt = NUM_PKT_BUF;
 	pdata->rx_buff_cnt = NUM_PKT_BUF;
 
 
-	return ret;
+	return 0;
 }
 }
 
 
 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
@@ -834,8 +851,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
 	u16 dst_ring_num;
 	u16 dst_ring_num;
 	int ret;
 	int ret;
 
 
-	xgene_gmac_tx_disable(pdata);
-	xgene_gmac_rx_disable(pdata);
+	pdata->port_ops->reset(pdata);
 
 
 	ret = xgene_enet_create_desc_rings(ndev);
 	ret = xgene_enet_create_desc_rings(ndev);
 	if (ret) {
 	if (ret) {
@@ -853,11 +869,26 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
 	}
 	}
 
 
 	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
 	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
-	xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id);
+	pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
+	pdata->mac_ops->init(pdata);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
+static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
+{
+	switch (pdata->phy_mode) {
+	case PHY_INTERFACE_MODE_RGMII:
+		pdata->mac_ops = &xgene_gmac_ops;
+		pdata->port_ops = &xgene_gport_ops;
+		break;
+	default:
+		pdata->mac_ops = &xgene_xgmac_ops;
+		pdata->port_ops = &xgene_xgport_ops;
+		break;
+	}
+}
+
 static int xgene_enet_probe(struct platform_device *pdev)
 static int xgene_enet_probe(struct platform_device *pdev)
 {
 {
 	struct net_device *ndev;
 	struct net_device *ndev;
@@ -886,8 +917,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
-	xgene_enet_reset(pdata);
-	xgene_gmac_init(pdata, SPEED_1000);
+	xgene_enet_setup_ops(pdata);
 
 
 	ret = register_netdev(ndev);
 	ret = register_netdev(ndev);
 	if (ret) {
 	if (ret) {
@@ -907,7 +937,10 @@ static int xgene_enet_probe(struct platform_device *pdev)
 
 
 	napi = &pdata->rx_ring->napi;
 	napi = &pdata->rx_ring->napi;
 	netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
 	netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
-	ret = xgene_enet_mdio_config(pdata);
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+		ret = xgene_enet_mdio_config(pdata);
+	else
+		INIT_DELAYED_WORK(&pdata->link_work, xgene_enet_link_state);
 
 
 	return ret;
 	return ret;
 err:
 err:
@@ -918,19 +951,21 @@ err:
 static int xgene_enet_remove(struct platform_device *pdev)
 static int xgene_enet_remove(struct platform_device *pdev)
 {
 {
 	struct xgene_enet_pdata *pdata;
 	struct xgene_enet_pdata *pdata;
+	struct xgene_mac_ops *mac_ops;
 	struct net_device *ndev;
 	struct net_device *ndev;
 
 
 	pdata = platform_get_drvdata(pdev);
 	pdata = platform_get_drvdata(pdev);
+	mac_ops = pdata->mac_ops;
 	ndev = pdata->ndev;
 	ndev = pdata->ndev;
 
 
-	xgene_gmac_rx_disable(pdata);
-	xgene_gmac_tx_disable(pdata);
+	mac_ops->rx_disable(pdata);
+	mac_ops->tx_disable(pdata);
 
 
 	netif_napi_del(&pdata->rx_ring->napi);
 	netif_napi_del(&pdata->rx_ring->napi);
 	xgene_enet_mdio_remove(pdata);
 	xgene_enet_mdio_remove(pdata);
 	xgene_enet_delete_desc_rings(pdata);
 	xgene_enet_delete_desc_rings(pdata);
 	unregister_netdev(ndev);
 	unregister_netdev(ndev);
-	xgene_gport_shutdown(pdata);
+	pdata->port_ops->shutdown(pdata);
 	free_netdev(ndev);
 	free_netdev(ndev);
 
 
 	return 0;
 	return 0;
@@ -956,5 +991,6 @@ module_platform_driver(xgene_enet_driver);
 
 
 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
 MODULE_VERSION(XGENE_DRV_VERSION);
 MODULE_VERSION(XGENE_DRV_VERSION);
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 21 - 3
drivers/net/ethernet/apm/xgene/xgene_enet_main.h

@@ -68,6 +68,23 @@ struct xgene_enet_desc_ring {
 	};
 	};
 };
 };
 
 
+struct xgene_mac_ops {
+	void (*init)(struct xgene_enet_pdata *pdata);
+	void (*reset)(struct xgene_enet_pdata *pdata);
+	void (*tx_enable)(struct xgene_enet_pdata *pdata);
+	void (*rx_enable)(struct xgene_enet_pdata *pdata);
+	void (*tx_disable)(struct xgene_enet_pdata *pdata);
+	void (*rx_disable)(struct xgene_enet_pdata *pdata);
+	void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+};
+
+struct xgene_port_ops {
+	void (*reset)(struct xgene_enet_pdata *pdata);
+	void (*cle_bypass)(struct xgene_enet_pdata *pdata,
+			   u32 dst_ring_num, u16 bufpool_id);
+	void (*shutdown)(struct xgene_enet_pdata *pdata);
+};
+
 /* ethernet private data */
 /* ethernet private data */
 struct xgene_enet_pdata {
 struct xgene_enet_pdata {
 	struct net_device *ndev;
 	struct net_device *ndev;
@@ -88,16 +105,17 @@ struct xgene_enet_pdata {
 	void __iomem *eth_ring_if_addr;
 	void __iomem *eth_ring_if_addr;
 	void __iomem *eth_diag_csr_addr;
 	void __iomem *eth_diag_csr_addr;
 	void __iomem *mcx_mac_addr;
 	void __iomem *mcx_mac_addr;
-	void __iomem *mcx_stats_addr;
 	void __iomem *mcx_mac_csr_addr;
 	void __iomem *mcx_mac_csr_addr;
 	void __iomem *base_addr;
 	void __iomem *base_addr;
 	void __iomem *ring_csr_addr;
 	void __iomem *ring_csr_addr;
 	void __iomem *ring_cmd_addr;
 	void __iomem *ring_cmd_addr;
 	u32 phy_addr;
 	u32 phy_addr;
 	int phy_mode;
 	int phy_mode;
-	u32 speed;
-	u16 rm;
+	enum xgene_enet_rm rm;
 	struct rtnl_link_stats64 stats;
 	struct rtnl_link_stats64 stats;
+	struct xgene_mac_ops *mac_ops;
+	struct xgene_port_ops *port_ops;
+	struct delayed_work link_work;
 };
 };
 
 
 /* Set the specified value into a bit-field defined by its starting position
 /* Set the specified value into a bit-field defined by its starting position

+ 331 - 0
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c

@@ -0,0 +1,331 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_xgmac.h"
+
+static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_ring_if_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 val)
+{
+	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
+				   void __iomem *cmd, void __iomem *cmd_done,
+				   u32 wr_addr, u32 wr_data)
+{
+	u32 done;
+	u8 wait = 10;
+
+	iowrite32(wr_addr, addr);
+	iowrite32(wr_data, wr);
+	iowrite32(XGENE_ENET_WR_CMD, cmd);
+
+	/* wait for write command to complete */
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		return false;
+
+	iowrite32(0, cmd);
+
+	return true;
+}
+
+static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
+			      u32 wr_addr, u32 wr_data)
+{
+	void __iomem *addr, *wr, *cmd, *cmd_done;
+
+	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
+	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
+		netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
+			   wr_addr);
+}
+
+static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->eth_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 *val)
+{
+	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
+				   void __iomem *cmd, void __iomem *cmd_done,
+				   u32 rd_addr, u32 *rd_data)
+{
+	u32 done;
+	u8 wait = 10;
+
+	iowrite32(rd_addr, addr);
+	iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+	/* wait for read command to complete */
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		return false;
+
+	*rd_data = ioread32(rd);
+	iowrite32(0, cmd);
+
+	return true;
+}
+
+static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
+			      u32 rd_addr, u32 *rd_data)
+{
+	void __iomem *addr, *rd, *cmd, *cmd_done;
+
+	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
+	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+	if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
+		netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
+			   rd_addr);
+}
+
+static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+	u32 data;
+	u8 wait = 10;
+
+	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
+	do {
+		usleep_range(100, 110);
+		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
+	} while ((data != 0xffffffff) && wait--);
+
+	if (data != 0xffffffff) {
+		netdev_err(ndev, "Failed to release memory from shutdown\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0);
+}
+
+static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
+}
+
+static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
+{
+	u32 addr0, addr1;
+	u8 *dev_addr = pdata->ndev->dev_addr;
+
+	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+		(dev_addr[1] << 8) | dev_addr[0];
+	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+	xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0);
+	xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
+}
+
+static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data);
+
+	return data;
+}
+
+static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_xgmac_reset(pdata);
+
+	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+	data |= HSTPPEN;
+	data &= ~HSTLENCHK;
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
+
+	xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600);
+	xgene_xgmac_set_mac_addr(pdata);
+
+	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
+	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
+
+	xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
+	xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
+	xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
+	data |= BIT(12);
+	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
+	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
+}
+
+static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
+}
+
+static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
+}
+
+static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
+}
+
+static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
+}
+
+static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+{
+	clk_prepare_enable(pdata->clk);
+	clk_disable_unprepare(pdata->clk);
+	clk_prepare_enable(pdata->clk);
+
+	xgene_enet_ecc_init(pdata);
+	xgene_enet_config_ring_if_assoc(pdata);
+}
+
+static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
+				    u32 dst_ring_num, u16 bufpool_id)
+{
+	u32 cb, fpsel;
+
+	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
+	cb |= CFG_CLE_BYPASS_EN0;
+	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
+
+	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
+	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
+	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
+	CFG_CLE_FPSEL0_SET(&cb, fpsel);
+	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
+}
+
+static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
+{
+	clk_disable_unprepare(pdata->clk);
+}
+
+void xgene_enet_link_state(struct work_struct *work)
+{
+	struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
+					 struct xgene_enet_pdata, link_work);
+	struct net_device *ndev = pdata->ndev;
+	u32 link_status, poll_interval;
+
+	link_status = xgene_enet_link_status(pdata);
+	if (link_status) {
+		if (!netif_carrier_ok(ndev)) {
+			netif_carrier_on(ndev);
+			xgene_xgmac_init(pdata);
+			xgene_xgmac_rx_enable(pdata);
+			xgene_xgmac_tx_enable(pdata);
+			netdev_info(ndev, "Link is Up - 10Gbps\n");
+		}
+		poll_interval = PHY_POLL_LINK_ON;
+	} else {
+		if (netif_carrier_ok(ndev)) {
+			xgene_xgmac_rx_disable(pdata);
+			xgene_xgmac_tx_disable(pdata);
+			netif_carrier_off(ndev);
+			netdev_info(ndev, "Link is Down\n");
+		}
+		poll_interval = PHY_POLL_LINK_OFF;
+	}
+
+	schedule_delayed_work(&pdata->link_work, poll_interval);
+}
+
+struct xgene_mac_ops xgene_xgmac_ops = {
+	.init = xgene_xgmac_init,
+	.reset = xgene_xgmac_reset,
+	.rx_enable = xgene_xgmac_rx_enable,
+	.tx_enable = xgene_xgmac_tx_enable,
+	.rx_disable = xgene_xgmac_rx_disable,
+	.tx_disable = xgene_xgmac_tx_disable,
+	.set_mac_addr = xgene_xgmac_set_mac_addr,
+};
+
+struct xgene_port_ops xgene_xgport_ops = {
+	.reset = xgene_enet_reset,
+	.cle_bypass = xgene_enet_xgcle_bypass,
+	.shutdown = xgene_enet_shutdown,
+};

+ 57 - 0
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h

@@ -0,0 +1,57 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_XGMAC_H__
+#define __XGENE_ENET_XGMAC_H__
+
+#define BLOCK_AXG_MAC_OFFSET		0x0800
+#define BLOCK_AXG_MAC_CSR_OFFSET	0x2000
+
+#define AXGMAC_CONFIG_0			0x0000
+#define AXGMAC_CONFIG_1			0x0004
+#define HSTMACRST			BIT(31)
+#define HSTTCTLEN			BIT(31)
+#define HSTTFEN				BIT(30)
+#define HSTRCTLEN			BIT(29)
+#define HSTRFEN				BIT(28)
+#define HSTPPEN				BIT(7)
+#define HSTDRPLT64			BIT(5)
+#define HSTLENCHK			BIT(3)
+#define HSTMACADR_LSW_ADDR		0x0010
+#define HSTMACADR_MSW_ADDR		0x0014
+#define HSTMAXFRAME_LENGTH_ADDR		0x0020
+
+#define XG_RSIF_CONFIG_REG_ADDR		0x00a0
+#define XCLE_BYPASS_REG0_ADDR           0x0160
+#define XCLE_BYPASS_REG1_ADDR           0x0164
+#define XG_CFG_BYPASS_ADDR		0x0204
+#define XG_LINK_STATUS_ADDR		0x0228
+#define XG_ENET_SPARE_CFG_REG_ADDR	0x040c
+#define XG_ENET_SPARE_CFG_REG_1_ADDR	0x0410
+#define XGENET_RX_DV_GATE_REG_0_ADDR	0x0804
+
+#define PHY_POLL_LINK_ON	(10 * HZ)
+#define PHY_POLL_LINK_OFF	(PHY_POLL_LINK_ON / 5)
+
+void xgene_enet_link_state(struct work_struct *work);
+extern struct xgene_mac_ops xgene_xgmac_ops;
+extern struct xgene_port_ops xgene_xgport_ops;
+
+#endif /* __XGENE_ENET_XGMAC_H__ */

+ 2 - 1
drivers/net/ethernet/broadcom/bcmsysport.c

@@ -436,7 +436,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
 	/* Flag the device and relevant IRQ as wakeup capable */
 	/* Flag the device and relevant IRQ as wakeup capable */
 	if (wol->wolopts) {
 	if (wol->wolopts) {
 		device_set_wakeup_enable(kdev, 1);
 		device_set_wakeup_enable(kdev, 1);
-		enable_irq_wake(priv->wol_irq);
+		if (priv->wol_irq_disabled)
+			enable_irq_wake(priv->wol_irq);
 		priv->wol_irq_disabled = 0;
 		priv->wol_irq_disabled = 0;
 	} else {
 	} else {
 		device_set_wakeup_enable(kdev, 0);
 		device_set_wakeup_enable(kdev, 0);

+ 4 - 5
drivers/net/ethernet/broadcom/genet/bcmgenet.c

@@ -1285,11 +1285,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 		cb = &priv->rx_cbs[priv->rx_read_ptr];
 		cb = &priv->rx_cbs[priv->rx_read_ptr];
 		skb = cb->skb;
 		skb = cb->skb;
 
 
-		rxpktprocessed++;
-
-		priv->rx_read_ptr++;
-		priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
 		/* We do not have a backing SKB, so we do not have a
 		/* We do not have a backing SKB, so we do not have a
 		 * corresponding DMA mapping for this incoming packet since
 		 * corresponding DMA mapping for this incoming packet since
 		 * bcmgenet_rx_refill always either has both skb and mapping or
 		 * bcmgenet_rx_refill always either has both skb and mapping or
@@ -1404,6 +1399,10 @@ refill:
 		err = bcmgenet_rx_refill(priv, cb);
 		err = bcmgenet_rx_refill(priv, cb);
 		if (err)
 		if (err)
 			netif_err(priv, rx_err, dev, "Rx refill failed\n");
 			netif_err(priv, rx_err, dev, "Rx refill failed\n");
+
+		rxpktprocessed++;
+		priv->rx_read_ptr++;
+		priv->rx_read_ptr &= (priv->num_rx_bds - 1);
 	}
 	}
 
 
 	return rxpktprocessed;
 	return rxpktprocessed;

+ 3 - 1
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c

@@ -86,7 +86,9 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	/* Flag the device and relevant IRQ as wakeup capable */
 	/* Flag the device and relevant IRQ as wakeup capable */
 	if (wol->wolopts) {
 	if (wol->wolopts) {
 		device_set_wakeup_enable(kdev, 1);
 		device_set_wakeup_enable(kdev, 1);
-		enable_irq_wake(priv->wol_irq);
+		/* Avoid unbalanced enable_irq_wake calls */
+		if (priv->wol_irq_disabled)
+			enable_irq_wake(priv->wol_irq);
 		priv->wol_irq_disabled = false;
 		priv->wol_irq_disabled = false;
 	} else {
 	} else {
 		device_set_wakeup_enable(kdev, 0);
 		device_set_wakeup_enable(kdev, 0);

+ 1 - 1
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h

@@ -968,7 +968,7 @@ void t4_intr_enable(struct adapter *adapter);
 void t4_intr_disable(struct adapter *adapter);
 void t4_intr_disable(struct adapter *adapter);
 int t4_slow_intr_handler(struct adapter *adapter);
 int t4_slow_intr_handler(struct adapter *adapter);
 
 
-int t4_wait_dev_ready(struct adapter *adap);
+int t4_wait_dev_ready(void __iomem *regs);
 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
 		  struct link_config *lc);
 		  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);

+ 5 - 1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c

@@ -6137,7 +6137,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
 	pci_save_state(pdev);
 	pci_save_state(pdev);
 	pci_cleanup_aer_uncorrect_error_status(pdev);
 	pci_cleanup_aer_uncorrect_error_status(pdev);
 
 
-	if (t4_wait_dev_ready(adap) < 0)
+	if (t4_wait_dev_ready(adap->regs) < 0)
 		return PCI_ERS_RESULT_DISCONNECT;
 		return PCI_ERS_RESULT_DISCONNECT;
 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
 		return PCI_ERS_RESULT_DISCONNECT;
 		return PCI_ERS_RESULT_DISCONNECT;
@@ -6530,6 +6530,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_disable_device;
 		goto out_disable_device;
 	}
 	}
 
 
+	err = t4_wait_dev_ready(regs);
+	if (err < 0)
+		goto out_unmap_bar0;
+
 	/* We control everything through one PF */
 	/* We control everything through one PF */
 	func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
 	func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
 	if (func != ent->driver_data) {
 	if (func != ent->driver_data) {

+ 4 - 1
drivers/net/ethernet/chelsio/cxgb4/sge.c

@@ -1123,7 +1123,10 @@ out_free:	dev_kfree_skb_any(skb);
 		lso->c.ipid_ofst = htons(0);
 		lso->c.ipid_ofst = htons(0);
 		lso->c.mss = htons(ssi->gso_size);
 		lso->c.mss = htons(ssi->gso_size);
 		lso->c.seqno_offset = htonl(0);
 		lso->c.seqno_offset = htonl(0);
-		lso->c.len = htonl(skb->len);
+		if (is_t4(adap->params.chip))
+			lso->c.len = htonl(skb->len);
+		else
+			lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
 		cpl = (void *)(lso + 1);
 		cpl = (void *)(lso + 1);
 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
 			TXPKT_IPHDR_LEN(l3hdr_len) |
 			TXPKT_IPHDR_LEN(l3hdr_len) |

+ 10 - 7
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c

@@ -3845,12 +3845,19 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
 	}
 	}
 }
 }
 
 
-int t4_wait_dev_ready(struct adapter *adap)
+#define CIM_PF_NOACCESS 0xeeeeeeee
+
+int t4_wait_dev_ready(void __iomem *regs)
 {
 {
-	if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
+	u32 whoami;
+
+	whoami = readl(regs + PL_WHOAMI);
+	if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
 		return 0;
 		return 0;
+
 	msleep(500);
 	msleep(500);
-	return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
+	whoami = readl(regs + PL_WHOAMI);
+	return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
 }
 }
 
 
 struct flash_desc {
 struct flash_desc {
@@ -3919,10 +3926,6 @@ int t4_prep_adapter(struct adapter *adapter)
 	uint16_t device_id;
 	uint16_t device_id;
 	u32 pl_rev;
 	u32 pl_rev;
 
 
-	ret = t4_wait_dev_ready(adapter);
-	if (ret < 0)
-		return ret;
-
 	get_pci_mode(adapter, &adapter->params.pci);
 	get_pci_mode(adapter, &adapter->params.pci);
 	pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
 	pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
 
 

+ 1 - 0
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h

@@ -527,6 +527,7 @@ struct cpl_tx_pkt_lso_core {
 #define LSO_LAST_SLICE    (1 << 22)
 #define LSO_LAST_SLICE    (1 << 22)
 #define LSO_FIRST_SLICE   (1 << 23)
 #define LSO_FIRST_SLICE   (1 << 23)
 #define LSO_OPCODE(x)     ((x) << 24)
 #define LSO_OPCODE(x)     ((x) << 24)
+#define LSO_T5_XFER_SIZE(x) ((x) << 0)
 	__be16 ipid_ofst;
 	__be16 ipid_ofst;
 	__be16 mss;
 	__be16 mss;
 	__be32 seqno_offset;
 	__be32 seqno_offset;

+ 2 - 3
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h

@@ -72,9 +72,8 @@
 #define  PIDX_MASK   0x00003fffU
 #define  PIDX_MASK   0x00003fffU
 #define  PIDX_SHIFT  0
 #define  PIDX_SHIFT  0
 #define  PIDX(x)     ((x) << PIDX_SHIFT)
 #define  PIDX(x)     ((x) << PIDX_SHIFT)
-#define  S_PIDX_T5   0
-#define  M_PIDX_T5   0x1fffU
-#define  PIDX_T5(x)  (((x) >> S_PIDX_T5) & M_PIDX_T5)
+#define  PIDX_SHIFT_T5   0
+#define  PIDX_T5(x)  ((x) << PIDX_SHIFT_T5)
 
 
 
 
 #define SGE_TIMERREGS	6
 #define SGE_TIMERREGS	6

+ 8 - 4
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c

@@ -163,15 +163,19 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
 		netif_carrier_on(dev);
 		netif_carrier_on(dev);
 
 
 		switch (pi->link_cfg.speed) {
 		switch (pi->link_cfg.speed) {
-		case SPEED_10000:
+		case 40000:
+			s = "40Gbps";
+			break;
+
+		case 10000:
 			s = "10Gbps";
 			s = "10Gbps";
 			break;
 			break;
 
 
-		case SPEED_1000:
+		case 1000:
 			s = "1000Mbps";
 			s = "1000Mbps";
 			break;
 			break;
 
 
-		case SPEED_100:
+		case 100:
 			s = "100Mbps";
 			s = "100Mbps";
 			break;
 			break;
 
 
@@ -2351,7 +2355,7 @@ static void cfg_queues(struct adapter *adapter)
 		struct port_info *pi = adap2pinfo(adapter, pidx);
 		struct port_info *pi = adap2pinfo(adapter, pidx);
 
 
 		pi->first_qset = qidx;
 		pi->first_qset = qidx;
-		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
 		qidx += pi->nqsets;
 		qidx += pi->nqsets;
 	}
 	}
 	s->ethqsets = qidx;
 	s->ethqsets = qidx;

+ 4 - 1
drivers/net/ethernet/chelsio/cxgb4vf/sge.c

@@ -1208,7 +1208,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 		lso->ipid_ofst = cpu_to_be16(0);
 		lso->ipid_ofst = cpu_to_be16(0);
 		lso->mss = cpu_to_be16(ssi->gso_size);
 		lso->mss = cpu_to_be16(ssi->gso_size);
 		lso->seqno_offset = cpu_to_be32(0);
 		lso->seqno_offset = cpu_to_be32(0);
-		lso->len = cpu_to_be32(skb->len);
+		if (is_t4(adapter->params.chip))
+			lso->len = cpu_to_be32(skb->len);
+		else
+			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
 
 
 		/*
 		/*
 		 * Set up TX Packet CPL pointer, control word and perform
 		 * Set up TX Packet CPL pointer, control word and perform

+ 6 - 0
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h

@@ -228,6 +228,12 @@ static inline bool is_10g_port(const struct link_config *lc)
 	return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
 	return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
 }
 }
 
 
+static inline bool is_x_10g_port(const struct link_config *lc)
+{
+	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+		(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+}
+
 static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
 static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
 {
 {
 	return adapter->params.vpd.cclk / 1000;
 	return adapter->params.vpd.cclk / 1000;

+ 7 - 3
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c

@@ -327,6 +327,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
 		v |= SUPPORTED_1000baseT_Full;
 		v |= SUPPORTED_1000baseT_Full;
 	if (word & FW_PORT_CAP_SPEED_10G)
 	if (word & FW_PORT_CAP_SPEED_10G)
 		v |= SUPPORTED_10000baseT_Full;
 		v |= SUPPORTED_10000baseT_Full;
+	if (word & FW_PORT_CAP_SPEED_40G)
+		v |= SUPPORTED_40000baseSR4_Full;
 	if (word & FW_PORT_CAP_ANEG)
 	if (word & FW_PORT_CAP_ANEG)
 		v |= SUPPORTED_Autoneg;
 		v |= SUPPORTED_Autoneg;
 	init_link_config(&pi->link_cfg, v);
 	init_link_config(&pi->link_cfg, v);
@@ -1352,11 +1354,13 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
 		if (word & FW_PORT_CMD_TXPAUSE)
 		if (word & FW_PORT_CMD_TXPAUSE)
 			fc |= PAUSE_TX;
 			fc |= PAUSE_TX;
 		if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
 		if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
-			speed = SPEED_100;
+			speed = 100;
 		else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
 		else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
-			speed = SPEED_1000;
+			speed = 1000;
 		else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
 		else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
-			speed = SPEED_10000;
+			speed = 10000;
+		else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+			speed = 40000;
 
 
 		/*
 		/*
 		 * Scan all of our "ports" (Virtual Interfaces) looking for
 		 * Scan all of our "ports" (Virtual Interfaces) looking for

+ 1 - 1
drivers/net/ethernet/freescale/fs_enet/mac-fcc.c

@@ -125,7 +125,7 @@ out:
 }
 }
 
 
 #define FCC_NAPI_RX_EVENT_MSK	(FCC_ENET_RXF | FCC_ENET_RXB)
 #define FCC_NAPI_RX_EVENT_MSK	(FCC_ENET_RXF | FCC_ENET_RXB)
-#define FCC_NAPI_TX_EVENT_MSK	(FCC_ENET_TXF | FCC_ENET_TXB)
+#define FCC_NAPI_TX_EVENT_MSK	(FCC_ENET_TXB)
 #define FCC_RX_EVENT		(FCC_ENET_RXF)
 #define FCC_RX_EVENT		(FCC_ENET_RXF)
 #define FCC_TX_EVENT		(FCC_ENET_TXB)
 #define FCC_TX_EVENT		(FCC_ENET_TXB)
 #define FCC_ERR_EVENT_MSK	(FCC_ENET_TXE)
 #define FCC_ERR_EVENT_MSK	(FCC_ENET_TXE)

+ 1 - 1
drivers/net/ethernet/freescale/fs_enet/mac-scc.c

@@ -116,7 +116,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
 }
 }
 
 
 #define SCC_NAPI_RX_EVENT_MSK	(SCCE_ENET_RXF | SCCE_ENET_RXB)
 #define SCC_NAPI_RX_EVENT_MSK	(SCCE_ENET_RXF | SCCE_ENET_RXB)
-#define SCC_NAPI_TX_EVENT_MSK	(SCCE_ENET_TXF | SCCE_ENET_TXB)
+#define SCC_NAPI_TX_EVENT_MSK	(SCCE_ENET_TXB)
 #define SCC_RX_EVENT		(SCCE_ENET_RXF)
 #define SCC_RX_EVENT		(SCCE_ENET_RXF)
 #define SCC_TX_EVENT		(SCCE_ENET_TXB)
 #define SCC_TX_EVENT		(SCCE_ENET_TXB)
 #define SCC_ERR_EVENT_MSK	(SCCE_ENET_TXE | SCCE_ENET_BSY)
 #define SCC_ERR_EVENT_MSK	(SCCE_ENET_TXE | SCCE_ENET_BSY)

+ 34 - 22
drivers/net/ethernet/freescale/fsl_pq_mdio.c

@@ -28,7 +28,9 @@
 #include <linux/of_device.h>
 #include <linux/of_device.h>
 
 
 #include <asm/io.h>
 #include <asm/io.h>
+#if IS_ENABLED(CONFIG_UCC_GETH)
 #include <asm/ucc.h>	/* for ucc_set_qe_mux_mii_mng() */
 #include <asm/ucc.h>	/* for ucc_set_qe_mux_mii_mng() */
+#endif
 
 
 #include "gianfar.h"
 #include "gianfar.h"
 
 
@@ -102,19 +104,22 @@ static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 {
 {
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 	struct fsl_pq_mii __iomem *regs = priv->regs;
 	struct fsl_pq_mii __iomem *regs = priv->regs;
-	u32 status;
+	unsigned int timeout;
 
 
 	/* Set the PHY address and the register address we want to write */
 	/* Set the PHY address and the register address we want to write */
-	out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+	iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
 
 
 	/* Write out the value we want */
 	/* Write out the value we want */
-	out_be32(&regs->miimcon, value);
+	iowrite32be(value, &regs->miimcon);
 
 
 	/* Wait for the transaction to finish */
 	/* Wait for the transaction to finish */
-	status = spin_event_timeout(!(in_be32(&regs->miimind) &	MIIMIND_BUSY),
-				    MII_TIMEOUT, 0);
+	timeout = MII_TIMEOUT;
+	while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+		cpu_relax();
+		timeout--;
+	}
 
 
-	return status ? 0 : -ETIMEDOUT;
+	return timeout ? 0 : -ETIMEDOUT;
 }
 }
 
 
 /*
 /*
@@ -131,25 +136,29 @@ static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
 {
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 	struct fsl_pq_mii __iomem *regs = priv->regs;
 	struct fsl_pq_mii __iomem *regs = priv->regs;
-	u32 status;
+	unsigned int timeout;
 	u16 value;
 	u16 value;
 
 
 	/* Set the PHY address and the register address we want to read */
 	/* Set the PHY address and the register address we want to read */
-	out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+	iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
 
 
 	/* Clear miimcom, and then initiate a read */
 	/* Clear miimcom, and then initiate a read */
-	out_be32(&regs->miimcom, 0);
-	out_be32(&regs->miimcom, MII_READ_COMMAND);
+	iowrite32be(0, &regs->miimcom);
+	iowrite32be(MII_READ_COMMAND, &regs->miimcom);
 
 
 	/* Wait for the transaction to finish, normally less than 100us */
 	/* Wait for the transaction to finish, normally less than 100us */
-	status = spin_event_timeout(!(in_be32(&regs->miimind) &
-				    (MIIMIND_NOTVALID | MIIMIND_BUSY)),
-				    MII_TIMEOUT, 0);
-	if (!status)
+	timeout = MII_TIMEOUT;
+	while ((ioread32be(&regs->miimind) &
+	       (MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) {
+		cpu_relax();
+		timeout--;
+	}
+
+	if (!timeout)
 		return -ETIMEDOUT;
 		return -ETIMEDOUT;
 
 
 	/* Grab the value of the register from miimstat */
 	/* Grab the value of the register from miimstat */
-	value = in_be32(&regs->miimstat);
+	value = ioread32be(&regs->miimstat);
 
 
 	dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
 	dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
 	return value;
 	return value;
@@ -160,23 +169,26 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
 {
 {
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 	struct fsl_pq_mii __iomem *regs = priv->regs;
 	struct fsl_pq_mii __iomem *regs = priv->regs;
-	u32 status;
+	unsigned int timeout;
 
 
 	mutex_lock(&bus->mdio_lock);
 	mutex_lock(&bus->mdio_lock);
 
 
 	/* Reset the management interface */
 	/* Reset the management interface */
-	out_be32(&regs->miimcfg, MIIMCFG_RESET);
+	iowrite32be(MIIMCFG_RESET, &regs->miimcfg);
 
 
 	/* Setup the MII Mgmt clock speed */
 	/* Setup the MII Mgmt clock speed */
-	out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
+	iowrite32be(MIIMCFG_INIT_VALUE, &regs->miimcfg);
 
 
 	/* Wait until the bus is free */
 	/* Wait until the bus is free */
-	status = spin_event_timeout(!(in_be32(&regs->miimind) &	MIIMIND_BUSY),
-				    MII_TIMEOUT, 0);
+	timeout = MII_TIMEOUT;
+	while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+		cpu_relax();
+		timeout--;
+	}
 
 
 	mutex_unlock(&bus->mdio_lock);
 	mutex_unlock(&bus->mdio_lock);
 
 
-	if (!status) {
+	if (!timeout) {
 		dev_err(&bus->dev, "timeout waiting for MII bus\n");
 		dev_err(&bus->dev, "timeout waiting for MII bus\n");
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
@@ -433,7 +445,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 
 
 			tbipa = data->get_tbipa(priv->map);
 			tbipa = data->get_tbipa(priv->map);
 
 
-			out_be32(tbipa, be32_to_cpup(prop));
+			iowrite32be(be32_to_cpup(prop), tbipa);
 		}
 		}
 	}
 	}
 
 

+ 37 - 31
drivers/net/ethernet/freescale/gianfar.c

@@ -88,8 +88,10 @@
 #include <linux/net_tstamp.h>
 #include <linux/net_tstamp.h>
 
 
 #include <asm/io.h>
 #include <asm/io.h>
+#ifdef CONFIG_PPC
 #include <asm/reg.h>
 #include <asm/reg.h>
 #include <asm/mpc85xx.h>
 #include <asm/mpc85xx.h>
+#endif
 #include <asm/irq.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <linux/module.h>
 #include <linux/module.h>
@@ -100,6 +102,8 @@
 #include <linux/phy_fixed.h>
 #include <linux/phy_fixed.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 
 #include "gianfar.h"
 #include "gianfar.h"
 
 
@@ -161,7 +165,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 		lstatus |= BD_LFLAG(RXBD_WRAP);
 		lstatus |= BD_LFLAG(RXBD_WRAP);
 
 
-	eieio();
+	gfar_wmb();
 
 
 	bdp->lstatus = lstatus;
 	bdp->lstatus = lstatus;
 }
 }
@@ -1061,6 +1065,7 @@ static void gfar_init_filer_table(struct gfar_private *priv)
 	}
 	}
 }
 }
 
 
+#ifdef CONFIG_PPC
 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
 {
 {
 	unsigned int pvr = mfspr(SPRN_PVR);
 	unsigned int pvr = mfspr(SPRN_PVR);
@@ -1093,6 +1098,7 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
 	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
 		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
 }
 }
+#endif
 
 
 static void gfar_detect_errata(struct gfar_private *priv)
 static void gfar_detect_errata(struct gfar_private *priv)
 {
 {
@@ -1101,10 +1107,12 @@ static void gfar_detect_errata(struct gfar_private *priv)
 	/* no plans to fix */
 	/* no plans to fix */
 	priv->errata |= GFAR_ERRATA_A002;
 	priv->errata |= GFAR_ERRATA_A002;
 
 
+#ifdef CONFIG_PPC
 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
 	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
 		__gfar_detect_errata_85xx(priv);
 		__gfar_detect_errata_85xx(priv);
 	else /* non-mpc85xx parts, i.e. e300 core based */
 	else /* non-mpc85xx parts, i.e. e300 core based */
 		__gfar_detect_errata_83xx(priv);
 		__gfar_detect_errata_83xx(priv);
+#endif
 
 
 	if (priv->errata)
 	if (priv->errata)
 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
 		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1754,26 +1762,32 @@ static void gfar_halt_nodisable(struct gfar_private *priv)
 {
 {
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
 	u32 tempval;
+	unsigned int timeout;
+	int stopped;
 
 
 	gfar_ints_disable(priv);
 	gfar_ints_disable(priv);
 
 
+	if (gfar_is_dma_stopped(priv))
+		return;
+
 	/* Stop the DMA, and wait for it to stop */
 	/* Stop the DMA, and wait for it to stop */
 	tempval = gfar_read(&regs->dmactrl);
 	tempval = gfar_read(&regs->dmactrl);
-	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
-	    (DMACTRL_GRS | DMACTRL_GTS)) {
-		int ret;
-
-		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
-		gfar_write(&regs->dmactrl, tempval);
+	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+	gfar_write(&regs->dmactrl, tempval);
 
 
-		do {
-			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
-				 (IEVENT_GRSC | IEVENT_GTSC)) ==
-				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
-			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
-				ret = __gfar_is_rx_idle(priv);
-		} while (!ret);
+retry:
+	timeout = 1000;
+	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
+		cpu_relax();
+		timeout--;
 	}
 	}
+
+	if (!timeout)
+		stopped = gfar_is_dma_stopped(priv);
+
+	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
+	    !__gfar_is_rx_idle(priv))
+		goto retry;
 }
 }
 
 
 /* Halt the receive and transmit queues */
 /* Halt the receive and transmit queues */
@@ -2357,18 +2371,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	 */
 	 */
 	spin_lock_irqsave(&tx_queue->txlock, flags);
 	spin_lock_irqsave(&tx_queue->txlock, flags);
 
 
-	/* The powerpc-specific eieio() is used, as wmb() has too strong
-	 * semantics (it requires synchronization between cacheable and
-	 * uncacheable mappings, which eieio doesn't provide and which we
-	 * don't need), thus requiring a more expensive sync instruction.  At
-	 * some point, the set of architecture-independent barrier functions
-	 * should be expanded to include weaker barriers.
-	 */
-	eieio();
+	gfar_wmb();
 
 
 	txbdp_start->lstatus = lstatus;
 	txbdp_start->lstatus = lstatus;
 
 
-	eieio(); /* force lstatus write before tx_skbuff */
+	gfar_wmb(); /* force lstatus write before tx_skbuff */
 
 
 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
 	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
 
 
@@ -3240,22 +3247,21 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 {
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	struct gfar_private *priv = netdev_priv(dev);
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	int idx;
-	char tmpbuf[ETH_ALEN];
 	u32 tempval;
 	u32 tempval;
 	u32 __iomem *macptr = &regs->macstnaddr1;
 	u32 __iomem *macptr = &regs->macstnaddr1;
 
 
 	macptr += num*2;
 	macptr += num*2;
 
 
-	/* Now copy it into the mac registers backwards, cuz
-	 * little endian is silly
+	/* For a station address of 0x12345678ABCD in transmission
+	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
+	 * MACnADDR2 is set to 0x34120000.
 	 */
 	 */
-	for (idx = 0; idx < ETH_ALEN; idx++)
-		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
+	tempval = (addr[5] << 24) | (addr[4] << 16) |
+		  (addr[3] << 8)  |  addr[2];
 
 
-	gfar_write(macptr, *((u32 *) (tmpbuf)));
+	gfar_write(macptr, tempval);
 
 
-	tempval = *((u32 *) (tmpbuf + 4));
+	tempval = (addr[1] << 24) | (addr[0] << 16);
 
 
 	gfar_write(macptr+1, tempval);
 	gfar_write(macptr+1, tempval);
 }
 }

+ 31 - 0
drivers/net/ethernet/freescale/gianfar.h

@@ -1226,6 +1226,37 @@ static inline void gfar_write_isrg(struct gfar_private *priv)
 	}
 	}
 }
 }
 
 
+static inline int gfar_is_dma_stopped(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+	return ((gfar_read(&regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) ==
+	       (IEVENT_GRSC | IEVENT_GTSC));
+}
+
+static inline int gfar_is_rx_dma_stopped(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+	return gfar_read(&regs->ievent) & IEVENT_GRSC;
+}
+
+static inline void gfar_wmb(void)
+{
+#if defined(CONFIG_PPC)
+	/* The powerpc-specific eieio() is used, as wmb() has too strong
+	 * semantics (it requires synchronization between cacheable and
+	 * uncacheable mappings, which eieio() doesn't provide and which we
+	 * don't need), thus requiring a more expensive sync instruction.  At
+	 * some point, the set of architecture-independent barrier functions
+	 * should be expanded to include weaker barriers.
+	 */
+	eieio();
+#else
+	wmb(); /* order write acesses for BD (or FCB) fields */
+#endif
+}
+
 irqreturn_t gfar_receive(int irq, void *dev_id);
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);

+ 1 - 0
drivers/net/ethernet/intel/Kconfig

@@ -304,6 +304,7 @@ config FM10K
 	tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
 	tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
 	default n
 	default n
 	depends on PCI_MSI
 	depends on PCI_MSI
+	select PTP_1588_CLOCK
 	---help---
 	---help---
 	  This driver supports Intel(R) FM10000 Ethernet Switch Host
 	  This driver supports Intel(R) FM10000 Ethernet Switch Host
 	  Interface.  For more information on how to identify your adapter,
 	  Interface.  For more information on how to identify your adapter,

+ 3 - 4
drivers/net/ethernet/intel/fm10k/fm10k_main.c

@@ -219,11 +219,10 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
 	/* flip page offset to other buffer */
 	/* flip page offset to other buffer */
 	rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
 	rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
 
 
-	/* since we are the only owner of the page and we need to
-	 * increment it, just set the value to 2 in order to avoid
-	 * an unnecessary locked operation
+	/* Even if we own the page, we are not allowed to use atomic_set()
+	 * This would break get_page_unless_zero() users.
 	 */
 	 */
-	atomic_set(&page->_count, 2);
+	atomic_inc(&page->_count);
 #else
 #else
 	/* move offset up to the next cache line */
 	/* move offset up to the next cache line */
 	rx_buffer->page_offset += truesize;
 	rx_buffer->page_offset += truesize;

+ 3 - 4
drivers/net/ethernet/intel/igb/igb_main.c

@@ -6545,11 +6545,10 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
 	/* flip page offset to other buffer */
 	/* flip page offset to other buffer */
 	rx_buffer->page_offset ^= IGB_RX_BUFSZ;
 	rx_buffer->page_offset ^= IGB_RX_BUFSZ;
 
 
-	/* since we are the only owner of the page and we need to
-	 * increment it, just set the value to 2 in order to avoid
-	 * an unnecessary locked operation
+	/* Even if we own the page, we are not allowed to use atomic_set()
+	 * This would break get_page_unless_zero() users.
 	 */
 	 */
-	atomic_set(&page->_count, 2);
+	atomic_inc(&page->_count);
 #else
 #else
 	/* move offset up to the next cache line */
 	/* move offset up to the next cache line */
 	rx_buffer->page_offset += truesize;
 	rx_buffer->page_offset += truesize;

+ 3 - 5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

@@ -1865,12 +1865,10 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
 	/* flip page offset to other buffer */
 	/* flip page offset to other buffer */
 	rx_buffer->page_offset ^= truesize;
 	rx_buffer->page_offset ^= truesize;
 
 
-	/*
-	 * since we are the only owner of the page and we need to
-	 * increment it, just set the value to 2 in order to avoid
-	 * an unecessary locked operation
+	/* Even if we own the page, we are not allowed to use atomic_set()
+	 * This would break get_page_unless_zero() users.
 	 */
 	 */
-	atomic_set(&page->_count, 2);
+	atomic_inc(&page->_count);
 #else
 #else
 	/* move offset up to the next cache line */
 	/* move offset up to the next cache line */
 	rx_buffer->page_offset += truesize;
 	rx_buffer->page_offset += truesize;

+ 2 - 1
drivers/net/ethernet/marvell/Kconfig

@@ -64,7 +64,8 @@ config MVPP2
 
 
 config PXA168_ETH
 config PXA168_ETH
 	tristate "Marvell pxa168 ethernet support"
 	tristate "Marvell pxa168 ethernet support"
-	depends on (CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST) && HAS_IOMEM
+	depends on HAS_IOMEM && HAS_DMA
+	depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
 	select PHYLIB
 	select PHYLIB
 	---help---
 	---help---
 	  This driver supports the pxa168 Ethernet ports.
 	  This driver supports the pxa168 Ethernet ports.

+ 3 - 3
drivers/net/ethernet/mellanox/mlx4/en_rx.c

@@ -76,10 +76,10 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
 	page_alloc->dma = dma;
 	page_alloc->dma = dma;
 	page_alloc->page_offset = frag_info->frag_align;
 	page_alloc->page_offset = frag_info->frag_align;
 	/* Not doing get_page() for each frag is a big win
 	/* Not doing get_page() for each frag is a big win
-	 * on asymetric workloads.
+	 * on asymetric workloads. Note we can not use atomic_set().
 	 */
 	 */
-	atomic_set(&page->_count,
-		   page_alloc->page_size / frag_info->frag_stride);
+	atomic_add(page_alloc->page_size / frag_info->frag_stride - 1,
+		   &page->_count);
 	return 0;
 	return 0;
 }
 }
 
 

+ 2 - 2
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c

@@ -134,7 +134,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
 	unsigned int value = 0;
 	unsigned int value = 0;
 	unsigned int perfect_addr_number = hw->unicast_filter_entries;
 	unsigned int perfect_addr_number = hw->unicast_filter_entries;
-	u32 mc_filter[2];
+	u32 mc_filter[8];
 	int mcbitslog2 = hw->mcast_bits_log2;
 	int mcbitslog2 = hw->mcast_bits_log2;
 
 
 	pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
 	pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
@@ -182,7 +182,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
 		struct netdev_hw_addr *ha;
 		struct netdev_hw_addr *ha;
 
 
 		netdev_for_each_uc_addr(ha, dev) {
 		netdev_for_each_uc_addr(ha, dev) {
-			stmmac_get_mac_addr(ioaddr, ha->addr,
+			stmmac_set_mac_addr(ioaddr, ha->addr,
 					    GMAC_ADDR_HIGH(reg),
 					    GMAC_ADDR_HIGH(reg),
 					    GMAC_ADDR_LOW(reg));
 					    GMAC_ADDR_LOW(reg));
 			reg++;
 			reg++;

+ 13 - 8
drivers/net/macvlan.c

@@ -260,7 +260,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
 					mode == MACVLAN_MODE_BRIDGE) ?:
 					mode == MACVLAN_MODE_BRIDGE) ?:
 				      netif_rx_ni(nskb);
 				      netif_rx_ni(nskb);
 			macvlan_count_rx(vlan, skb->len + ETH_HLEN,
 			macvlan_count_rx(vlan, skb->len + ETH_HLEN,
-					 err == NET_RX_SUCCESS, 1);
+					 err == NET_RX_SUCCESS, true);
 		}
 		}
 	}
 	}
 }
 }
@@ -379,7 +379,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
 	nskb->pkt_type = PACKET_HOST;
 	nskb->pkt_type = PACKET_HOST;
 
 
 	ret = netif_rx(nskb);
 	ret = netif_rx(nskb);
-	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
+	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
 }
 }
 
 
 static void macvlan_forward_source(struct sk_buff *skb,
 static void macvlan_forward_source(struct sk_buff *skb,
@@ -407,7 +407,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 	const struct macvlan_dev *src;
 	const struct macvlan_dev *src;
 	struct net_device *dev;
 	struct net_device *dev;
 	unsigned int len = 0;
 	unsigned int len = 0;
-	int ret = NET_RX_DROP;
+	int ret;
+	rx_handler_result_t handle_res;
 
 
 	port = macvlan_port_get_rcu(skb->dev);
 	port = macvlan_port_get_rcu(skb->dev);
 	if (is_multicast_ether_addr(eth->h_dest)) {
 	if (is_multicast_ether_addr(eth->h_dest)) {
@@ -423,6 +424,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 			vlan = src;
 			vlan = src;
 			ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
 			ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
 			      netif_rx(skb);
 			      netif_rx(skb);
+			handle_res = RX_HANDLER_CONSUMED;
 			goto out;
 			goto out;
 		}
 		}
 
 
@@ -448,17 +450,20 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 	}
 	}
 	len = skb->len + ETH_HLEN;
 	len = skb->len + ETH_HLEN;
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	skb = skb_share_check(skb, GFP_ATOMIC);
-	if (!skb)
+	if (!skb) {
+		ret = NET_RX_DROP;
+		handle_res = RX_HANDLER_CONSUMED;
 		goto out;
 		goto out;
+	}
 
 
 	skb->dev = dev;
 	skb->dev = dev;
 	skb->pkt_type = PACKET_HOST;
 	skb->pkt_type = PACKET_HOST;
 
 
-	ret = netif_rx(skb);
-
+	ret = NET_RX_SUCCESS;
+	handle_res = RX_HANDLER_ANOTHER;
 out:
 out:
-	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
-	return RX_HANDLER_CONSUMED;
+	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
+	return handle_res;
 }
 }
 
 
 static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
 static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)

+ 29 - 2
drivers/net/phy/micrel.c

@@ -26,6 +26,7 @@
 #include <linux/phy.h>
 #include <linux/phy.h>
 #include <linux/micrel_phy.h>
 #include <linux/micrel_phy.h>
 #include <linux/of.h>
 #include <linux/of.h>
+#include <linux/clk.h>
 
 
 /* Operation Mode Strap Override */
 /* Operation Mode Strap Override */
 #define MII_KSZPHY_OMSO				0x16
 #define MII_KSZPHY_OMSO				0x16
@@ -72,9 +73,12 @@ static int ksz_config_flags(struct phy_device *phydev)
 {
 {
 	int regval;
 	int regval;
 
 
-	if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+	if (phydev->dev_flags & (MICREL_PHY_50MHZ_CLK | MICREL_PHY_25MHZ_CLK)) {
 		regval = phy_read(phydev, MII_KSZPHY_CTRL);
 		regval = phy_read(phydev, MII_KSZPHY_CTRL);
-		regval |= KSZ8051_RMII_50MHZ_CLK;
+		if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK)
+			regval |= KSZ8051_RMII_50MHZ_CLK;
+		else
+			regval &= ~KSZ8051_RMII_50MHZ_CLK;
 		return phy_write(phydev, MII_KSZPHY_CTRL, regval);
 		return phy_write(phydev, MII_KSZPHY_CTRL, regval);
 	}
 	}
 	return 0;
 	return 0;
@@ -440,6 +444,27 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
 {
 {
 }
 }
 
 
+static int ksz8021_probe(struct phy_device *phydev)
+{
+	struct clk *clk;
+
+	clk = devm_clk_get(&phydev->dev, "rmii-ref");
+	if (!IS_ERR(clk)) {
+		unsigned long rate = clk_get_rate(clk);
+
+		if (rate > 24500000 && rate < 25500000) {
+			phydev->dev_flags |= MICREL_PHY_25MHZ_CLK;
+		} else if (rate > 49500000 && rate < 50500000) {
+			phydev->dev_flags |= MICREL_PHY_50MHZ_CLK;
+		} else {
+			dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static struct phy_driver ksphy_driver[] = {
 static struct phy_driver ksphy_driver[] = {
 {
 {
 	.phy_id		= PHY_ID_KS8737,
 	.phy_id		= PHY_ID_KS8737,
@@ -462,6 +487,7 @@ static struct phy_driver ksphy_driver[] = {
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
 			   SUPPORTED_Asym_Pause),
 			   SUPPORTED_Asym_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.probe		= ksz8021_probe,
 	.config_init	= ksz8021_config_init,
 	.config_init	= ksz8021_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
 	.read_status	= genphy_read_status,
@@ -477,6 +503,7 @@ static struct phy_driver ksphy_driver[] = {
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
 			   SUPPORTED_Asym_Pause),
 			   SUPPORTED_Asym_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.probe		= ksz8021_probe,
 	.config_init	= ksz8021_config_init,
 	.config_init	= ksz8021_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
 	.read_status	= genphy_read_status,

+ 83 - 15
drivers/net/usb/r8152.c

@@ -26,7 +26,7 @@
 #include <linux/mdio.h>
 #include <linux/mdio.h>
 
 
 /* Version Information */
 /* Version Information */
-#define DRIVER_VERSION "v1.06.1 (2014/10/01)"
+#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
 #define MODULENAME "r8152"
@@ -566,6 +566,7 @@ struct r8152 {
 	spinlock_t rx_lock, tx_lock;
 	spinlock_t rx_lock, tx_lock;
 	struct delayed_work schedule;
 	struct delayed_work schedule;
 	struct mii_if_info mii;
 	struct mii_if_info mii;
+	struct mutex control;	/* use for hw setting */
 
 
 	struct rtl_ops {
 	struct rtl_ops {
 		void (*init)(struct r8152 *);
 		void (*init)(struct r8152 *);
@@ -942,15 +943,8 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
 	if (phy_id != R8152_PHY_ID)
 	if (phy_id != R8152_PHY_ID)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	ret = usb_autopm_get_interface(tp->intf);
-	if (ret < 0)
-		goto out;
-
 	ret = r8152_mdio_read(tp, reg);
 	ret = r8152_mdio_read(tp, reg);
 
 
-	usb_autopm_put_interface(tp->intf);
-
-out:
 	return ret;
 	return ret;
 }
 }
 
 
@@ -965,12 +959,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
 	if (phy_id != R8152_PHY_ID)
 	if (phy_id != R8152_PHY_ID)
 		return;
 		return;
 
 
-	if (usb_autopm_get_interface(tp->intf) < 0)
-		return;
-
 	r8152_mdio_write(tp, reg, val);
 	r8152_mdio_write(tp, reg, val);
-
-	usb_autopm_put_interface(tp->intf);
 }
 }
 
 
 static int
 static int
@@ -989,12 +978,16 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
 	if (ret < 0)
 	if (ret < 0)
 		goto out1;
 		goto out1;
 
 
+	mutex_lock(&tp->control);
+
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 
 
 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
 	pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
 	pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
 
 
+	mutex_unlock(&tp->control);
+
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 out1:
 out1:
 	return ret;
 	return ret;
@@ -2145,6 +2138,13 @@ static int rtl8152_set_features(struct net_device *dev,
 {
 {
 	netdev_features_t changed = features ^ dev->features;
 	netdev_features_t changed = features ^ dev->features;
 	struct r8152 *tp = netdev_priv(dev);
 	struct r8152 *tp = netdev_priv(dev);
+	int ret;
+
+	ret = usb_autopm_get_interface(tp->intf);
+	if (ret < 0)
+		goto out;
+
+	mutex_lock(&tp->control);
 
 
 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -2153,7 +2153,12 @@ static int rtl8152_set_features(struct net_device *dev,
 			rtl_rx_vlan_en(tp, false);
 			rtl_rx_vlan_en(tp, false);
 	}
 	}
 
 
-	return 0;
+	mutex_unlock(&tp->control);
+
+	usb_autopm_put_interface(tp->intf);
+
+out:
+	return ret;
 }
 }
 
 
 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
@@ -2851,6 +2856,11 @@ static void rtl_work_func_t(struct work_struct *work)
 	if (test_bit(RTL8152_UNPLUG, &tp->flags))
 	if (test_bit(RTL8152_UNPLUG, &tp->flags))
 		goto out1;
 		goto out1;
 
 
+	if (!mutex_trylock(&tp->control)) {
+		schedule_delayed_work(&tp->schedule, 0);
+		goto out1;
+	}
+
 	if (test_bit(RTL8152_LINK_CHG, &tp->flags))
 	if (test_bit(RTL8152_LINK_CHG, &tp->flags))
 		set_carrier(tp);
 		set_carrier(tp);
 
 
@@ -2866,6 +2876,8 @@ static void rtl_work_func_t(struct work_struct *work)
 	if (test_bit(PHY_RESET, &tp->flags))
 	if (test_bit(PHY_RESET, &tp->flags))
 		rtl_phy_reset(tp);
 		rtl_phy_reset(tp);
 
 
+	mutex_unlock(&tp->control);
+
 out1:
 out1:
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 }
 }
@@ -2885,6 +2897,8 @@ static int rtl8152_open(struct net_device *netdev)
 		goto out;
 		goto out;
 	}
 	}
 
 
+	mutex_lock(&tp->control);
+
 	/* The WORK_ENABLE may be set when autoresume occurs */
 	/* The WORK_ENABLE may be set when autoresume occurs */
 	if (test_bit(WORK_ENABLE, &tp->flags)) {
 	if (test_bit(WORK_ENABLE, &tp->flags)) {
 		clear_bit(WORK_ENABLE, &tp->flags);
 		clear_bit(WORK_ENABLE, &tp->flags);
@@ -2913,6 +2927,8 @@ static int rtl8152_open(struct net_device *netdev)
 		free_all_mem(tp);
 		free_all_mem(tp);
 	}
 	}
 
 
+	mutex_unlock(&tp->control);
+
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 
 
 out:
 out:
@@ -2933,6 +2949,8 @@ static int rtl8152_close(struct net_device *netdev)
 	if (res < 0) {
 	if (res < 0) {
 		rtl_drop_queued_tx(tp);
 		rtl_drop_queued_tx(tp);
 	} else {
 	} else {
+		mutex_lock(&tp->control);
+
 		/* The autosuspend may have been enabled and wouldn't
 		/* The autosuspend may have been enabled and wouldn't
 		 * be disable when autoresume occurs, because the
 		 * be disable when autoresume occurs, because the
 		 * netif_running() would be false.
 		 * netif_running() would be false.
@@ -2945,6 +2963,9 @@ static int rtl8152_close(struct net_device *netdev)
 		tasklet_disable(&tp->tl);
 		tasklet_disable(&tp->tl);
 		tp->rtl_ops.down(tp);
 		tp->rtl_ops.down(tp);
 		tasklet_enable(&tp->tl);
 		tasklet_enable(&tp->tl);
+
+		mutex_unlock(&tp->control);
+
 		usb_autopm_put_interface(tp->intf);
 		usb_autopm_put_interface(tp->intf);
 	}
 	}
 
 
@@ -3169,6 +3190,8 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 {
 {
 	struct r8152 *tp = usb_get_intfdata(intf);
 	struct r8152 *tp = usb_get_intfdata(intf);
 
 
+	mutex_lock(&tp->control);
+
 	if (PMSG_IS_AUTO(message))
 	if (PMSG_IS_AUTO(message))
 		set_bit(SELECTIVE_SUSPEND, &tp->flags);
 		set_bit(SELECTIVE_SUSPEND, &tp->flags);
 	else
 	else
@@ -3188,6 +3211,8 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 		tasklet_enable(&tp->tl);
 		tasklet_enable(&tp->tl);
 	}
 	}
 
 
+	mutex_unlock(&tp->control);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3195,6 +3220,8 @@ static int rtl8152_resume(struct usb_interface *intf)
 {
 {
 	struct r8152 *tp = usb_get_intfdata(intf);
 	struct r8152 *tp = usb_get_intfdata(intf);
 
 
+	mutex_lock(&tp->control);
+
 	if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
 	if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
 		tp->rtl_ops.init(tp);
 		tp->rtl_ops.init(tp);
 		netif_device_attach(tp->netdev);
 		netif_device_attach(tp->netdev);
@@ -3220,6 +3247,8 @@ static int rtl8152_resume(struct usb_interface *intf)
 		usb_submit_urb(tp->intr_urb, GFP_KERNEL);
 		usb_submit_urb(tp->intr_urb, GFP_KERNEL);
 	}
 	}
 
 
+	mutex_unlock(&tp->control);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3230,9 +3259,13 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	if (usb_autopm_get_interface(tp->intf) < 0)
 	if (usb_autopm_get_interface(tp->intf) < 0)
 		return;
 		return;
 
 
+	mutex_lock(&tp->control);
+
 	wol->supported = WAKE_ANY;
 	wol->supported = WAKE_ANY;
 	wol->wolopts = __rtl_get_wol(tp);
 	wol->wolopts = __rtl_get_wol(tp);
 
 
+	mutex_unlock(&tp->control);
+
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 }
 }
 
 
@@ -3245,9 +3278,13 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	if (ret < 0)
 	if (ret < 0)
 		goto out_set_wol;
 		goto out_set_wol;
 
 
+	mutex_lock(&tp->control);
+
 	__rtl_set_wol(tp, wol->wolopts);
 	__rtl_set_wol(tp, wol->wolopts);
 	tp->saved_wolopts = wol->wolopts & WAKE_ANY;
 	tp->saved_wolopts = wol->wolopts & WAKE_ANY;
 
 
+	mutex_unlock(&tp->control);
+
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 
 
 out_set_wol:
 out_set_wol:
@@ -3282,11 +3319,25 @@ static
 int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
 int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
 {
 {
 	struct r8152 *tp = netdev_priv(netdev);
 	struct r8152 *tp = netdev_priv(netdev);
+	int ret;
 
 
 	if (!tp->mii.mdio_read)
 	if (!tp->mii.mdio_read)
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 
 
-	return mii_ethtool_gset(&tp->mii, cmd);
+	ret = usb_autopm_get_interface(tp->intf);
+	if (ret < 0)
+		goto out;
+
+	mutex_lock(&tp->control);
+
+	ret = mii_ethtool_gset(&tp->mii, cmd);
+
+	mutex_unlock(&tp->control);
+
+	usb_autopm_put_interface(tp->intf);
+
+out:
+	return ret;
 }
 }
 
 
 static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -3298,8 +3349,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	if (ret < 0)
 	if (ret < 0)
 		goto out;
 		goto out;
 
 
+	mutex_lock(&tp->control);
+
 	ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
 	ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
 
 
+	mutex_unlock(&tp->control);
+
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 
 
 out:
 out:
@@ -3459,8 +3514,12 @@ rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
 	if (ret < 0)
 	if (ret < 0)
 		goto out;
 		goto out;
 
 
+	mutex_lock(&tp->control);
+
 	ret = tp->rtl_ops.eee_get(tp, edata);
 	ret = tp->rtl_ops.eee_get(tp, edata);
 
 
+	mutex_unlock(&tp->control);
+
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 
 
 out:
 out:
@@ -3477,10 +3536,14 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
 	if (ret < 0)
 	if (ret < 0)
 		goto out;
 		goto out;
 
 
+	mutex_lock(&tp->control);
+
 	ret = tp->rtl_ops.eee_set(tp, edata);
 	ret = tp->rtl_ops.eee_set(tp, edata);
 	if (!ret)
 	if (!ret)
 		ret = mii_nway_restart(&tp->mii);
 		ret = mii_nway_restart(&tp->mii);
 
 
+	mutex_unlock(&tp->control);
+
 	usb_autopm_put_interface(tp->intf);
 	usb_autopm_put_interface(tp->intf);
 
 
 out:
 out:
@@ -3522,7 +3585,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 		break;
 		break;
 
 
 	case SIOCGMIIREG:
 	case SIOCGMIIREG:
+		mutex_lock(&tp->control);
 		data->val_out = r8152_mdio_read(tp, data->reg_num);
 		data->val_out = r8152_mdio_read(tp, data->reg_num);
+		mutex_unlock(&tp->control);
 		break;
 		break;
 
 
 	case SIOCSMIIREG:
 	case SIOCSMIIREG:
@@ -3530,7 +3595,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 			res = -EPERM;
 			res = -EPERM;
 			break;
 			break;
 		}
 		}
+		mutex_lock(&tp->control);
 		r8152_mdio_write(tp, data->reg_num, data->val_in);
 		r8152_mdio_write(tp, data->reg_num, data->val_in);
+		mutex_unlock(&tp->control);
 		break;
 		break;
 
 
 	default:
 	default:
@@ -3723,6 +3790,7 @@ static int rtl8152_probe(struct usb_interface *intf,
 		goto out;
 		goto out;
 
 
 	tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
 	tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
+	mutex_init(&tp->control);
 	INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
 	INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
 
 
 	netdev->netdev_ops = &rtl8152_netdev_ops;
 	netdev->netdev_ops = &rtl8152_netdev_ops;

+ 3 - 1
drivers/net/wireless/ath/ath9k/ath9k.h

@@ -294,7 +294,6 @@ struct ath_tx_control {
  *  (axq_qnum).
  *  (axq_qnum).
  */
  */
 struct ath_tx {
 struct ath_tx {
-	u16 seq_no;
 	u32 txqsetup;
 	u32 txqsetup;
 	spinlock_t txbuflock;
 	spinlock_t txbuflock;
 	struct list_head txbuf;
 	struct list_head txbuf;
@@ -563,6 +562,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs);
 int ath_txq_update(struct ath_softc *sc, int qnum,
 int ath_txq_update(struct ath_softc *sc, int qnum,
 		   struct ath9k_tx_queue_info *q);
 		   struct ath9k_tx_queue_info *q);
 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
+void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 		 struct ath_tx_control *txctl);
 		 struct ath_tx_control *txctl);
 void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -592,6 +592,8 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
 struct ath_vif {
 struct ath_vif {
 	struct list_head list;
 	struct list_head list;
 
 
+	u16 seq_no;
+
 	/* BSS info */
 	/* BSS info */
 	u8 bssid[ETH_ALEN];
 	u8 bssid[ETH_ALEN];
 	u16 aid;
 	u16 aid;

+ 2 - 10
drivers/net/wireless/ath/ath9k/beacon.c

@@ -144,16 +144,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
 	mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust;
 	mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust;
 
 
 	info = IEEE80211_SKB_CB(skb);
 	info = IEEE80211_SKB_CB(skb);
-	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
-		/*
-		 * TODO: make sure the seq# gets assigned properly (vs. other
-		 * TX frames)
-		 */
-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-		sc->tx.seq_no += 0x10;
-		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
-	}
+
+	ath_assign_seq(common, skb);
 
 
 	if (vif->p2p)
 	if (vif->p2p)
 		ath9k_beacon_add_noa(sc, avp, skb);
 		ath9k_beacon_add_noa(sc, avp, skb);

+ 1 - 0
drivers/net/wireless/ath/ath9k/htc_drv_init.c

@@ -464,6 +464,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	ah->dev = priv->dev;
 	ah->dev = priv->dev;
+	ah->hw = priv->hw;
 	ah->hw_version.devid = devid;
 	ah->hw_version.devid = devid;
 	ah->hw_version.usbdev = drv_info;
 	ah->hw_version.usbdev = drv_info;
 	ah->ah_flags |= AH_USE_EEPROM;
 	ah->ah_flags |= AH_USE_EEPROM;

+ 1 - 1
drivers/net/wireless/ath/ath9k/main.c

@@ -2332,7 +2332,7 @@ static void ath9k_remove_chanctx(struct ieee80211_hw *hw,
 		conf->def.chan->center_freq);
 		conf->def.chan->center_freq);
 
 
 	ctx->assigned = false;
 	ctx->assigned = false;
-	ctx->hw_queue_base = -1;
+	ctx->hw_queue_base = 0;
 	ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
 	ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
 
 
 	mutex_unlock(&sc->mutex);
 	mutex_unlock(&sc->mutex);

+ 7 - 1
drivers/net/wireless/ath/ath9k/tx99.c

@@ -54,6 +54,12 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_tx_info *tx_info;
 	struct ieee80211_tx_info *tx_info;
 	struct sk_buff *skb;
 	struct sk_buff *skb;
+	struct ath_vif *avp;
+
+	if (!sc->tx99_vif)
+		return NULL;
+
+	avp = (struct ath_vif *)sc->tx99_vif->drv_priv;
 
 
 	skb = alloc_skb(len, GFP_KERNEL);
 	skb = alloc_skb(len, GFP_KERNEL);
 	if (!skb)
 	if (!skb)
@@ -71,7 +77,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
 	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
 	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
 	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
 	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
 
 
-	hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+	hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
 
 
 	tx_info = IEEE80211_SKB_CB(skb);
 	tx_info = IEEE80211_SKB_CB(skb);
 	memset(tx_info, 0, sizeof(*tx_info));
 	memset(tx_info, 0, sizeof(*tx_info));

+ 23 - 11
drivers/net/wireless/ath/ath9k/xmit.c

@@ -2139,6 +2139,28 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
 	return bf;
 	return bf;
 }
 }
 
 
+void ath_assign_seq(struct ath_common *common, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_vif *vif = info->control.vif;
+	struct ath_vif *avp;
+
+	if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+		return;
+
+	if (!vif)
+		return;
+
+	avp = (struct ath_vif *)vif->drv_priv;
+
+	if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+		avp->seq_no += 0x10;
+
+	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+	hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+}
+
 static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
 static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
 			  struct ath_tx_control *txctl)
 			  struct ath_tx_control *txctl)
 {
 {
@@ -2162,17 +2184,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
 	if (info->control.hw_key)
 	if (info->control.hw_key)
 		frmlen += info->control.hw_key->icv_len;
 		frmlen += info->control.hw_key->icv_len;
 
 
-	/*
-	 * As a temporary workaround, assign seq# here; this will likely need
-	 * to be cleaned up to work better with Beacon transmission and virtual
-	 * BSSes.
-	 */
-	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
-		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
-			sc->tx.seq_no += 0x10;
-		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
-		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
-	}
+	ath_assign_seq(ath9k_hw_common(sc->sc_ah), skb);
 
 
 	if ((vif && vif->type != NL80211_IFTYPE_AP &&
 	if ((vif && vif->type != NL80211_IFTYPE_AP &&
 	            vif->type != NL80211_IFTYPE_AP_VLAN) ||
 	            vif->type != NL80211_IFTYPE_AP_VLAN) ||

+ 4 - 4
drivers/net/wireless/ath/main.c

@@ -79,13 +79,13 @@ void ath_printk(const char *level, const struct ath_common* common,
 	vaf.fmt = fmt;
 	vaf.fmt = fmt;
 	vaf.va = &args;
 	vaf.va = &args;
 
 
-	if (common && common->hw && common->hw->wiphy)
+	if (common && common->hw && common->hw->wiphy) {
 		printk("%sath: %s: %pV",
 		printk("%sath: %s: %pV",
 		       level, wiphy_name(common->hw->wiphy), &vaf);
 		       level, wiphy_name(common->hw->wiphy), &vaf);
-	else
+		trace_ath_log(common->hw->wiphy, &vaf);
+	} else {
 		printk("%sath: %pV", level, &vaf);
 		printk("%sath: %pV", level, &vaf);
-
-	trace_ath_log(common->hw->wiphy, &vaf);
+	}
 
 
 	va_end(args);
 	va_end(args);
 }
 }

+ 25 - 11
drivers/net/wireless/rtl818x/rtl8180/dev.c

@@ -742,35 +742,49 @@ static void rtl8180_int_disable(struct ieee80211_hw *dev)
 }
 }
 
 
 static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev,
 static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev,
-			    u32 rates_mask)
+			    u32 basic_mask)
 {
 {
 	struct rtl8180_priv *priv = dev->priv;
 	struct rtl8180_priv *priv = dev->priv;
-
-	u8 max, min;
 	u16 reg;
 	u16 reg;
-
-	max = fls(rates_mask) - 1;
-	min = ffs(rates_mask) - 1;
+	u32 resp_mask;
+	u8 basic_max;
+	u8 resp_max, resp_min;
+
+	resp_mask = basic_mask;
+	/* IEEE80211 says the response rate should be equal to the highest basic
+	 * rate that is not faster than received frame. But it says also that if
+	 * the basic rate set does not contains any rate for the current
+	 * modulation class then mandatory rate set must be used for that
+	 * modulation class. Eventually add OFDM mandatory rates..
+	 */
+	if ((resp_mask & 0xf) == resp_mask)
+		resp_mask |= 0x150; /* 6, 12, 24Mbps */
 
 
 	switch (priv->chip_family) {
 	switch (priv->chip_family) {
 
 
 	case RTL818X_CHIP_FAMILY_RTL8180:
 	case RTL818X_CHIP_FAMILY_RTL8180:
 		/* in 8180 this is NOT a BITMAP */
 		/* in 8180 this is NOT a BITMAP */
+		basic_max = fls(basic_mask) - 1;
 		reg = rtl818x_ioread16(priv, &priv->map->BRSR);
 		reg = rtl818x_ioread16(priv, &priv->map->BRSR);
 		reg &= ~3;
 		reg &= ~3;
-		reg |= max;
+		reg |= basic_max;
 		rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
 		rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
 		break;
 		break;
 
 
 	case RTL818X_CHIP_FAMILY_RTL8185:
 	case RTL818X_CHIP_FAMILY_RTL8185:
+		resp_max = fls(resp_mask) - 1;
+		resp_min = ffs(resp_mask) - 1;
 		/* in 8185 this is a BITMAP */
 		/* in 8185 this is a BITMAP */
-		rtl818x_iowrite16(priv, &priv->map->BRSR, rates_mask);
-		rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (max << 4) | min);
+		rtl818x_iowrite16(priv, &priv->map->BRSR, basic_mask);
+		rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (resp_max << 4) |
+				resp_min);
 		break;
 		break;
 
 
 	case RTL818X_CHIP_FAMILY_RTL8187SE:
 	case RTL818X_CHIP_FAMILY_RTL8187SE:
-		/* in 8187se this is a BITMAP */
-		rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, rates_mask);
+		/* in 8187se this is a BITMAP. BRSR reg actually sets
+		 * response rates.
+		 */
+		rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, resp_mask);
 		break;
 		break;
 	}
 	}
 }
 }

+ 1 - 1
drivers/net/wireless/rtlwifi/wifi.h

@@ -1370,7 +1370,7 @@ struct rtl_mac {
 	bool rdg_en;
 	bool rdg_en;
 
 
 	/*AP*/
 	/*AP*/
-	u8 bssid[6];
+	u8 bssid[ETH_ALEN] __aligned(2);
 	u32 vendor;
 	u32 vendor;
 	u8 mcs[16];	/* 16 bytes mcs for HT rates. */
 	u8 mcs[16];	/* 16 bytes mcs for HT rates. */
 	u32 basic_rates; /* b/g rates */
 	u32 basic_rates; /* b/g rates */

+ 1 - 0
include/linux/micrel_phy.h

@@ -37,6 +37,7 @@
 
 
 /* struct phy_device dev_flags definitions */
 /* struct phy_device dev_flags definitions */
 #define MICREL_PHY_50MHZ_CLK	0x00000001
 #define MICREL_PHY_50MHZ_CLK	0x00000001
+#define MICREL_PHY_25MHZ_CLK	0x00000002
 
 
 #define MICREL_KSZ9021_EXTREG_CTRL	0xB
 #define MICREL_KSZ9021_EXTREG_CTRL	0xB
 #define MICREL_KSZ9021_EXTREG_DATA_WRITE	0xC
 #define MICREL_KSZ9021_EXTREG_DATA_WRITE	0xC

+ 2 - 155
include/net/netfilter/ipv6/nf_reject.h

@@ -1,11 +1,7 @@
 #ifndef _IPV6_NF_REJECT_H
 #ifndef _IPV6_NF_REJECT_H
 #define _IPV6_NF_REJECT_H
 #define _IPV6_NF_REJECT_H
 
 
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_checksum.h>
-#include <linux/netfilter_ipv6.h>
+#include <linux/icmpv6.h>
 
 
 static inline void
 static inline void
 nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
 nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
@@ -17,155 +13,6 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
 	icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
 	icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
 }
 }
 
 
-/* Send RST reply */
-static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
-{
-	struct sk_buff *nskb;
-	struct tcphdr otcph, *tcph;
-	unsigned int otcplen, hh_len;
-	int tcphoff, needs_ack;
-	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
-	struct ipv6hdr *ip6h;
-#define DEFAULT_TOS_VALUE	0x0U
-	const __u8 tclass = DEFAULT_TOS_VALUE;
-	struct dst_entry *dst = NULL;
-	u8 proto;
-	__be16 frag_off;
-	struct flowi6 fl6;
-
-	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
-	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
-		pr_debug("addr is not unicast.\n");
-		return;
-	}
-
-	proto = oip6h->nexthdr;
-	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
-
-	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
-		pr_debug("Cannot get TCP header.\n");
-		return;
-	}
-
-	otcplen = oldskb->len - tcphoff;
-
-	/* IP header checks: fragment, too short. */
-	if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
-		pr_debug("proto(%d) != IPPROTO_TCP, "
-			 "or too short. otcplen = %d\n",
-			 proto, otcplen);
-		return;
-	}
-
-	if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
-		BUG();
-
-	/* No RST for RST. */
-	if (otcph.rst) {
-		pr_debug("RST is set\n");
-		return;
-	}
-
-	/* Check checksum. */
-	if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
-		pr_debug("TCP checksum is invalid\n");
-		return;
-	}
-
-	memset(&fl6, 0, sizeof(fl6));
-	fl6.flowi6_proto = IPPROTO_TCP;
-	fl6.saddr = oip6h->daddr;
-	fl6.daddr = oip6h->saddr;
-	fl6.fl6_sport = otcph.dest;
-	fl6.fl6_dport = otcph.source;
-	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
-	dst = ip6_route_output(net, NULL, &fl6);
-	if (dst == NULL || dst->error) {
-		dst_release(dst);
-		return;
-	}
-	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
-	if (IS_ERR(dst))
-		return;
-
-	hh_len = (dst->dev->hard_header_len + 15)&~15;
-	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
-			 + sizeof(struct tcphdr) + dst->trailer_len,
-			 GFP_ATOMIC);
-
-	if (!nskb) {
-		net_dbg_ratelimited("cannot alloc skb\n");
-		dst_release(dst);
-		return;
-	}
-
-	skb_dst_set(nskb, dst);
-
-	skb_reserve(nskb, hh_len + dst->header_len);
-
-	skb_put(nskb, sizeof(struct ipv6hdr));
-	skb_reset_network_header(nskb);
-	ip6h = ipv6_hdr(nskb);
-	ip6_flow_hdr(ip6h, tclass, 0);
-	ip6h->hop_limit = ip6_dst_hoplimit(dst);
-	ip6h->nexthdr = IPPROTO_TCP;
-	ip6h->saddr = oip6h->daddr;
-	ip6h->daddr = oip6h->saddr;
-
-	skb_reset_transport_header(nskb);
-	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
-	/* Truncate to length (no data) */
-	tcph->doff = sizeof(struct tcphdr)/4;
-	tcph->source = otcph.dest;
-	tcph->dest = otcph.source;
-
-	if (otcph.ack) {
-		needs_ack = 0;
-		tcph->seq = otcph.ack_seq;
-		tcph->ack_seq = 0;
-	} else {
-		needs_ack = 1;
-		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
-				      + otcplen - (otcph.doff<<2));
-		tcph->seq = 0;
-	}
-
-	/* Reset flags */
-	((u_int8_t *)tcph)[13] = 0;
-	tcph->rst = 1;
-	tcph->ack = needs_ack;
-	tcph->window = 0;
-	tcph->urg_ptr = 0;
-	tcph->check = 0;
-
-	/* Adjust TCP checksum */
-	tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
-				      &ipv6_hdr(nskb)->daddr,
-				      sizeof(struct tcphdr), IPPROTO_TCP,
-				      csum_partial(tcph,
-						   sizeof(struct tcphdr), 0));
-
-	nf_ct_attach(nskb, oldskb);
-
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-	/* If we use ip6_local_out for bridged traffic, the MAC source on
-	 * the RST will be ours, instead of the destination's.  This confuses
-	 * some routers/firewalls, and they drop the packet.  So we need to
-	 * build the eth header using the original destination's MAC as the
-	 * source, and send the RST packet directly.
-	 */
-	if (oldskb->nf_bridge) {
-		struct ethhdr *oeth = eth_hdr(oldskb);
-		nskb->dev = oldskb->nf_bridge->physindev;
-		nskb->protocol = htons(ETH_P_IPV6);
-		ip6h->payload_len = htons(sizeof(struct tcphdr));
-		if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
-				    oeth->h_source, oeth->h_dest, nskb->len) < 0)
-			return;
-		dev_queue_xmit(nskb);
-	} else
-#endif
-		ip6_local_out(nskb);
-}
+void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
 
 
 #endif /* _IPV6_NF_REJECT_H */
 #endif /* _IPV6_NF_REJECT_H */

+ 1 - 1
include/uapi/linux/netfilter/nf_tables.h

@@ -774,7 +774,7 @@ enum nft_reject_inet_code {
 	NFT_REJECT_ICMPX_ADMIN_PROHIBITED,
 	NFT_REJECT_ICMPX_ADMIN_PROHIBITED,
 	__NFT_REJECT_ICMPX_MAX
 	__NFT_REJECT_ICMPX_MAX
 };
 };
-#define NFT_REJECT_ICMPX_MAX	(__NFT_REJECT_ICMPX_MAX + 1)
+#define NFT_REJECT_ICMPX_MAX	(__NFT_REJECT_ICMPX_MAX - 1)
 
 
 /**
 /**
  * enum nft_reject_attributes - nf_tables reject expression netlink attributes
  * enum nft_reject_attributes - nf_tables reject expression netlink attributes

+ 1 - 0
net/Kconfig

@@ -6,6 +6,7 @@ menuconfig NET
 	bool "Networking support"
 	bool "Networking support"
 	select NLATTR
 	select NLATTR
 	select GENERIC_NET_UTILS
 	select GENERIC_NET_UTILS
+	select ANON_INODES
 	---help---
 	---help---
 	  Unless you really know what you are doing, you should say Y here.
 	  Unless you really know what you are doing, you should say Y here.
 	  The reason is that some programs need kernel networking support even
 	  The reason is that some programs need kernel networking support even

+ 3 - 6
net/core/filter.c

@@ -51,9 +51,9 @@
  *	@skb: buffer to filter
  *	@skb: buffer to filter
  *
  *
  * Run the filter code and then cut skb->data to correct size returned by
  * Run the filter code and then cut skb->data to correct size returned by
- * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
+ * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
  * than pkt_len we keep whole skb->data. This is the socket level
  * than pkt_len we keep whole skb->data. This is the socket level
- * wrapper to sk_run_filter. It returns 0 if the packet should
+ * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
  * be accepted or -EPERM if the packet should be tossed.
  * be accepted or -EPERM if the packet should be tossed.
  *
  *
  */
  */
@@ -565,12 +565,9 @@ err:
 }
 }
 
 
 /* Security:
 /* Security:
- *
- * A BPF program is able to use 16 cells of memory to store intermediate
- * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
  *
  *
  * As we dont want to clear mem[] array for each packet going through
  * As we dont want to clear mem[] array for each packet going through
- * sk_run_filter(), we check that filter loaded by user never try to read
+ * __bpf_prog_run(), we check that filter loaded by user never try to read
  * a cell if not previously written, and we check all branches to be sure
  * a cell if not previously written, and we check all branches to be sure
  * a malicious user doesn't try to abuse us.
  * a malicious user doesn't try to abuse us.
  */
  */

+ 23 - 13
net/core/flow_dissector.c

@@ -100,6 +100,13 @@ ip:
 		if (ip_is_fragment(iph))
 		if (ip_is_fragment(iph))
 			ip_proto = 0;
 			ip_proto = 0;
 
 
+		/* skip the address processing if skb is NULL.  The assumption
+		 * here is that if there is no skb we are not looking for flow
+		 * info but lengths and protocols.
+		 */
+		if (!skb)
+			break;
+
 		iph_to_flow_copy_addrs(flow, iph);
 		iph_to_flow_copy_addrs(flow, iph);
 		break;
 		break;
 	}
 	}
@@ -114,17 +121,15 @@ ipv6:
 			return false;
 			return false;
 
 
 		ip_proto = iph->nexthdr;
 		ip_proto = iph->nexthdr;
-		flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
-		flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
 		nhoff += sizeof(struct ipv6hdr);
 		nhoff += sizeof(struct ipv6hdr);
 
 
-		/* skip the flow label processing if skb is NULL.  The
-		 * assumption here is that if there is no skb we are not
-		 * looking for flow info as much as we are length.
-		 */
+		/* see comment above in IPv4 section */
 		if (!skb)
 		if (!skb)
 			break;
 			break;
 
 
+		flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
+		flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
+
 		flow_label = ip6_flowlabel(iph);
 		flow_label = ip6_flowlabel(iph);
 		if (flow_label) {
 		if (flow_label) {
 			/* Awesome, IPv6 packet has a flow label so we can
 			/* Awesome, IPv6 packet has a flow label so we can
@@ -231,9 +236,13 @@ ipv6:
 
 
 	flow->n_proto = proto;
 	flow->n_proto = proto;
 	flow->ip_proto = ip_proto;
 	flow->ip_proto = ip_proto;
-	flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen);
 	flow->thoff = (u16) nhoff;
 	flow->thoff = (u16) nhoff;
 
 
+	/* unless skb is set we don't need to record port info */
+	if (skb)
+		flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
+						   data, hlen);
+
 	return true;
 	return true;
 }
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
 EXPORT_SYMBOL(__skb_flow_dissect);
@@ -334,15 +343,16 @@ u32 __skb_get_poff(const struct sk_buff *skb, void *data,
 
 
 	switch (keys->ip_proto) {
 	switch (keys->ip_proto) {
 	case IPPROTO_TCP: {
 	case IPPROTO_TCP: {
-		const struct tcphdr *tcph;
-		struct tcphdr _tcph;
+		/* access doff as u8 to avoid unaligned access */
+		const u8 *doff;
+		u8 _doff;
 
 
-		tcph = __skb_header_pointer(skb, poff, sizeof(_tcph),
-					    data, hlen, &_tcph);
-		if (!tcph)
+		doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
+					    data, hlen, &_doff);
+		if (!doff)
 			return poff;
 			return poff;
 
 
-		poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
+		poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
 		break;
 		break;
 	}
 	}
 	case IPPROTO_UDP:
 	case IPPROTO_UDP:

+ 23 - 12
net/core/skbuff.c

@@ -360,18 +360,29 @@ refill:
 				goto end;
 				goto end;
 		}
 		}
 		nc->frag.size = PAGE_SIZE << order;
 		nc->frag.size = PAGE_SIZE << order;
-recycle:
-		atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
+		/* Even if we own the page, we do not use atomic_set().
+		 * This would break get_page_unless_zero() users.
+		 */
+		atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
+			   &nc->frag.page->_count);
 		nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
 		nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
 		nc->frag.offset = 0;
 		nc->frag.offset = 0;
 	}
 	}
 
 
 	if (nc->frag.offset + fragsz > nc->frag.size) {
 	if (nc->frag.offset + fragsz > nc->frag.size) {
-		/* avoid unnecessary locked operations if possible */
-		if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
-		    atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
-			goto recycle;
-		goto refill;
+		if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
+			if (!atomic_sub_and_test(nc->pagecnt_bias,
+						 &nc->frag.page->_count))
+				goto refill;
+			/* OK, page count is 0, we can safely set it */
+			atomic_set(&nc->frag.page->_count,
+				   NETDEV_PAGECNT_MAX_BIAS);
+		} else {
+			atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
+				   &nc->frag.page->_count);
+		}
+		nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
+		nc->frag.offset = 0;
 	}
 	}
 
 
 	data = page_address(nc->frag.page) + nc->frag.offset;
 	data = page_address(nc->frag.page) + nc->frag.offset;
@@ -4126,11 +4137,11 @@ EXPORT_SYMBOL(skb_vlan_untag);
 /**
 /**
  * alloc_skb_with_frags - allocate skb with page frags
  * alloc_skb_with_frags - allocate skb with page frags
  *
  *
- * header_len: size of linear part
- * data_len: needed length in frags
- * max_page_order: max page order desired.
- * errcode: pointer to error code if any
- * gfp_mask: allocation mask
+ * @header_len: size of linear part
+ * @data_len: needed length in frags
+ * @max_page_order: max page order desired.
+ * @errcode: pointer to error code if any
+ * @gfp_mask: allocation mask
  *
  *
  * This can be used to allocate a paged skb, given a maximal order for frags.
  * This can be used to allocate a paged skb, given a maximal order for frags.
  */
  */

+ 4 - 6
net/netfilter/nft_reject.c

@@ -72,7 +72,7 @@ nla_put_failure:
 }
 }
 EXPORT_SYMBOL_GPL(nft_reject_dump);
 EXPORT_SYMBOL_GPL(nft_reject_dump);
 
 
-static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = {
+static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX + 1] = {
 	[NFT_REJECT_ICMPX_NO_ROUTE]		= ICMP_NET_UNREACH,
 	[NFT_REJECT_ICMPX_NO_ROUTE]		= ICMP_NET_UNREACH,
 	[NFT_REJECT_ICMPX_PORT_UNREACH]		= ICMP_PORT_UNREACH,
 	[NFT_REJECT_ICMPX_PORT_UNREACH]		= ICMP_PORT_UNREACH,
 	[NFT_REJECT_ICMPX_HOST_UNREACH]		= ICMP_HOST_UNREACH,
 	[NFT_REJECT_ICMPX_HOST_UNREACH]		= ICMP_HOST_UNREACH,
@@ -81,8 +81,7 @@ static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = {
 
 
 int nft_reject_icmp_code(u8 code)
 int nft_reject_icmp_code(u8 code)
 {
 {
-	if (code > NFT_REJECT_ICMPX_MAX)
-		return -EINVAL;
+	BUG_ON(code > NFT_REJECT_ICMPX_MAX);
 
 
 	return icmp_code_v4[code];
 	return icmp_code_v4[code];
 }
 }
@@ -90,7 +89,7 @@ int nft_reject_icmp_code(u8 code)
 EXPORT_SYMBOL_GPL(nft_reject_icmp_code);
 EXPORT_SYMBOL_GPL(nft_reject_icmp_code);
 
 
 
 
-static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = {
+static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX + 1] = {
 	[NFT_REJECT_ICMPX_NO_ROUTE]		= ICMPV6_NOROUTE,
 	[NFT_REJECT_ICMPX_NO_ROUTE]		= ICMPV6_NOROUTE,
 	[NFT_REJECT_ICMPX_PORT_UNREACH]		= ICMPV6_PORT_UNREACH,
 	[NFT_REJECT_ICMPX_PORT_UNREACH]		= ICMPV6_PORT_UNREACH,
 	[NFT_REJECT_ICMPX_HOST_UNREACH]		= ICMPV6_ADDR_UNREACH,
 	[NFT_REJECT_ICMPX_HOST_UNREACH]		= ICMPV6_ADDR_UNREACH,
@@ -99,8 +98,7 @@ static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = {
 
 
 int nft_reject_icmpv6_code(u8 code)
 int nft_reject_icmpv6_code(u8 code)
 {
 {
-	if (code > NFT_REJECT_ICMPX_MAX)
-		return -EINVAL;
+	BUG_ON(code > NFT_REJECT_ICMPX_MAX);
 
 
 	return icmp_code_v6[code];
 	return icmp_code_v6[code];
 }
 }

+ 0 - 1
net/netlabel/netlabel_kapi.c

@@ -246,7 +246,6 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
  * @addr: IP address in network byte order (struct in[6]_addr)
  * @addr: IP address in network byte order (struct in[6]_addr)
  * @mask: address mask in network byte order (struct in[6]_addr)
  * @mask: address mask in network byte order (struct in[6]_addr)
  * @family: address family
  * @family: address family
- * @secid: LSM secid value for the entry
  * @audit_info: NetLabel audit information
  * @audit_info: NetLabel audit information
  *
  *
  * Description:
  * Description:

+ 2 - 2
net/rfkill/core.c

@@ -329,7 +329,7 @@ static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
 /**
 /**
  * __rfkill_switch_all - Toggle state of all switches of given type
  * __rfkill_switch_all - Toggle state of all switches of given type
  * @type: type of interfaces to be affected
  * @type: type of interfaces to be affected
- * @state: the new state
+ * @blocked: the new state
  *
  *
  * This function sets the state of all switches of given type,
  * This function sets the state of all switches of given type,
  * unless a specific switch is claimed by userspace (in which case,
  * unless a specific switch is claimed by userspace (in which case,
@@ -353,7 +353,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
 /**
 /**
  * rfkill_switch_all - Toggle state of all switches of given type
  * rfkill_switch_all - Toggle state of all switches of given type
  * @type: type of interfaces to be affected
  * @type: type of interfaces to be affected
- * @state: the new state
+ * @blocked: the new state
  *
  *
  * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
  * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
  * Please refer to __rfkill_switch_all() for details.
  * Please refer to __rfkill_switch_all() for details.

+ 13 - 7
net/sched/sch_generic.c

@@ -57,7 +57,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 
 
 static void try_bulk_dequeue_skb(struct Qdisc *q,
 static void try_bulk_dequeue_skb(struct Qdisc *q,
 				 struct sk_buff *skb,
 				 struct sk_buff *skb,
-				 const struct netdev_queue *txq)
+				 const struct netdev_queue *txq,
+				 int *packets)
 {
 {
 	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
 	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
 
 
@@ -70,6 +71,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
 		bytelimit -= nskb->len; /* covers GSO len */
 		bytelimit -= nskb->len; /* covers GSO len */
 		skb->next = nskb;
 		skb->next = nskb;
 		skb = nskb;
 		skb = nskb;
+		(*packets)++; /* GSO counts as one pkt */
 	}
 	}
 	skb->next = NULL;
 	skb->next = NULL;
 }
 }
@@ -77,11 +79,13 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
  * A requeued skb (via q->gso_skb) can also be a SKB list.
  * A requeued skb (via q->gso_skb) can also be a SKB list.
  */
  */
-static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate)
+static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
+				   int *packets)
 {
 {
 	struct sk_buff *skb = q->gso_skb;
 	struct sk_buff *skb = q->gso_skb;
 	const struct netdev_queue *txq = q->dev_queue;
 	const struct netdev_queue *txq = q->dev_queue;
 
 
+	*packets = 1;
 	*validate = true;
 	*validate = true;
 	if (unlikely(skb)) {
 	if (unlikely(skb)) {
 		/* check the reason of requeuing without tx lock first */
 		/* check the reason of requeuing without tx lock first */
@@ -98,7 +102,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate)
 		    !netif_xmit_frozen_or_stopped(txq)) {
 		    !netif_xmit_frozen_or_stopped(txq)) {
 			skb = q->dequeue(q);
 			skb = q->dequeue(q);
 			if (skb && qdisc_may_bulk(q))
 			if (skb && qdisc_may_bulk(q))
-				try_bulk_dequeue_skb(q, skb, txq);
+				try_bulk_dequeue_skb(q, skb, txq, packets);
 		}
 		}
 	}
 	}
 	return skb;
 	return skb;
@@ -204,7 +208,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
  *				>0 - queue is not empty.
  *				>0 - queue is not empty.
  *
  *
  */
  */
-static inline int qdisc_restart(struct Qdisc *q)
+static inline int qdisc_restart(struct Qdisc *q, int *packets)
 {
 {
 	struct netdev_queue *txq;
 	struct netdev_queue *txq;
 	struct net_device *dev;
 	struct net_device *dev;
@@ -213,7 +217,7 @@ static inline int qdisc_restart(struct Qdisc *q)
 	bool validate;
 	bool validate;
 
 
 	/* Dequeue packet */
 	/* Dequeue packet */
-	skb = dequeue_skb(q, &validate);
+	skb = dequeue_skb(q, &validate, packets);
 	if (unlikely(!skb))
 	if (unlikely(!skb))
 		return 0;
 		return 0;
 
 
@@ -227,14 +231,16 @@ static inline int qdisc_restart(struct Qdisc *q)
 void __qdisc_run(struct Qdisc *q)
 void __qdisc_run(struct Qdisc *q)
 {
 {
 	int quota = weight_p;
 	int quota = weight_p;
+	int packets;
 
 
-	while (qdisc_restart(q)) {
+	while (qdisc_restart(q, &packets)) {
 		/*
 		/*
 		 * Ordered by possible occurrence: Postpone processing if
 		 * Ordered by possible occurrence: Postpone processing if
 		 * 1. we've exceeded packet quota
 		 * 1. we've exceeded packet quota
 		 * 2. another process needs the CPU;
 		 * 2. another process needs the CPU;
 		 */
 		 */
-		if (--quota <= 0 || need_resched()) {
+		quota -= packets;
+		if (quota <= 0 || need_resched()) {
 			__netif_schedule(q);
 			__netif_schedule(q);
 			break;
 			break;
 		}
 		}