Эх сурвалжийг харах

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

This merges (3f509c6 netfilter: nf_nat_sip: fix incorrect handling
of EBUSY for RTCP expectation) to Patrick McHardy's IPv6 NAT changes.
Pablo Neira Ayuso 13 жил өмнө
parent
commit
ace1fe1231
100 өөрчлөгдсөн 2044 нэмэгдсэн , 1213 устгасан
  1. 75 0
      Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
  2. 4 3
      Documentation/networking/batman-adv.txt
  3. 28 9
      Documentation/networking/ip-sysctl.txt
  4. 0 5
      Documentation/networking/stmmac.txt
  5. 1 1
      Makefile
  6. 0 1
      drivers/Makefile
  7. 1 0
      drivers/acpi/acpica/tbxface.c
  8. 2 2
      drivers/bcma/Kconfig
  9. 2 0
      drivers/bcma/bcma_private.h
  10. 25 3
      drivers/bcma/driver_chipcommon_nflash.c
  11. 120 3
      drivers/bcma/driver_chipcommon_sflash.c
  12. 17 0
      drivers/bcma/main.c
  13. 1 0
      drivers/char/agp/intel-agp.h
  14. 69 36
      drivers/char/agp/intel-gtt.c
  15. 0 3
      drivers/gpu/drm/drm_modes.c
  16. 2 2
      drivers/gpu/drm/drm_proc.c
  17. 4 4
      drivers/gpu/drm/i915/i915_gem.c
  18. 4 1
      drivers/gpu/drm/i915/i915_gem_gtt.c
  19. 1 0
      drivers/gpu/drm/i915/i915_reg.h
  20. 33 3
      drivers/gpu/drm/i915/intel_crt.c
  21. 2 0
      drivers/gpu/drm/i915/intel_drv.h
  22. 22 9
      drivers/gpu/drm/i915/intel_modes.c
  23. 4 11
      drivers/gpu/drm/i915/intel_pm.c
  24. 1 0
      drivers/gpu/drm/i915/intel_sdvo.c
  25. 21 4
      drivers/gpu/drm/radeon/atombios_crtc.c
  26. 88 17
      drivers/gpu/drm/radeon/r600_cs.c
  27. 17 0
      drivers/gpu/drm/radeon/r600d.h
  28. 0 15
      drivers/gpu/drm/radeon/radeon.h
  29. 1 1
      drivers/gpu/drm/radeon/radeon_atombios.c
  30. 1 55
      drivers/gpu/drm/radeon/radeon_atpx_handler.c
  31. 134 4
      drivers/gpu/drm/radeon/radeon_bios.c
  32. 2 1
      drivers/gpu/drm/radeon/radeon_drv.c
  33. 1 2
      drivers/gpu/drm/radeon/radeon_object.c
  34. 1 0
      drivers/gpu/drm/radeon/radeon_ring.c
  35. 0 8
      drivers/gpu/drm/radeon/reg_srcs/r600
  36. 1 2
      drivers/gpu/drm/udl/udl_modeset.c
  37. 5 1
      drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
  38. 1 0
      drivers/i2c/busses/i2c-diolan-u2c.c
  39. 18 10
      drivers/i2c/busses/i2c-nomadik.c
  40. 1 1
      drivers/i2c/busses/i2c-omap.c
  41. 1 1
      drivers/i2c/busses/i2c-tegra.c
  42. 2 2
      drivers/net/Kconfig
  43. 1 0
      drivers/net/Makefile
  44. 20 11
      drivers/net/bonding/bond_main.c
  45. 3 1
      drivers/net/can/sja1000/sja1000_platform.c
  46. 4 3
      drivers/net/can/softing/softing_fw.c
  47. 0 3
      drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
  48. 4 0
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
  49. 2 2
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
  50. 0 2
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
  51. 9 9
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
  52. 1 0
      drivers/net/ethernet/emulex/benet/be.h
  53. 5 3
      drivers/net/ethernet/emulex/benet/be_cmds.c
  54. 14 10
      drivers/net/ethernet/emulex/benet/be_main.c
  55. 7 0
      drivers/net/ethernet/freescale/Kconfig
  56. 1 0
      drivers/net/ethernet/freescale/Makefile
  57. 292 257
      drivers/net/ethernet/freescale/fsl_pq_mdio.c
  58. 0 52
      drivers/net/ethernet/freescale/fsl_pq_mdio.h
  59. 1 2
      drivers/net/ethernet/freescale/gianfar.c
  60. 0 1
      drivers/net/ethernet/freescale/ucc_geth.c
  61. 274 0
      drivers/net/ethernet/freescale/xgmac_mdio.c
  62. 2 2
      drivers/net/ethernet/intel/e1000e/82571.c
  63. 1 0
      drivers/net/ethernet/intel/e1000e/e1000.h
  64. 2 1
      drivers/net/ethernet/intel/e1000e/ethtool.c
  65. 36 31
      drivers/net/ethernet/intel/e1000e/netdev.c
  66. 3 1
      drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
  67. 129 31
      drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
  68. 14 0
      drivers/net/ethernet/intel/ixgbevf/vf.c
  69. 1 0
      drivers/net/ethernet/intel/ixgbevf/vf.h
  70. 13 4
      drivers/net/ethernet/nvidia/forcedeth.c
  71. 2 0
      drivers/net/ethernet/realtek/r8169.c
  72. 133 102
      drivers/net/ethernet/sfc/efx.c
  73. 3 5
      drivers/net/ethernet/sfc/ethtool.c
  74. 1 1
      drivers/net/ethernet/sfc/falcon_boards.c
  75. 26 23
      drivers/net/ethernet/sfc/net_driver.h
  76. 4 2
      drivers/net/ethernet/sfc/nic.c
  77. 245 376
      drivers/net/ethernet/sfc/tx.c
  78. 5 0
      drivers/net/ethernet/stmicro/stmmac/common.h
  79. 6 0
      drivers/net/ethernet/stmicro/stmmac/descs.h
  80. 5 0
      drivers/net/ethernet/stmicro/stmmac/descs_com.h
  81. 5 0
      drivers/net/ethernet/stmicro/stmmac/dwmac100.h
  82. 4 1
      drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
  83. 5 0
      drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
  84. 5 0
      drivers/net/ethernet/stmicro/stmmac/mmc.h
  85. 3 3
      drivers/net/ethernet/stmicro/stmmac/mmc_core.c
  86. 5 0
      drivers/net/ethernet/stmicro/stmmac/stmmac.h
  87. 6 5
      drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
  88. 0 1
      drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
  89. 8 31
      drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
  90. 4 0
      drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
  91. 0 1
      drivers/net/ethernet/tundra/tsi108_eth.c
  92. 1 2
      drivers/net/ethernet/wiznet/w5100.c
  93. 1 2
      drivers/net/ethernet/wiznet/w5300.c
  94. 0 0
      drivers/net/ieee802154/Kconfig
  95. 0 0
      drivers/net/ieee802154/Makefile
  96. 1 11
      drivers/net/ieee802154/at86rf230.c
  97. 0 1
      drivers/net/ieee802154/fakehard.c
  98. 0 0
      drivers/net/ieee802154/fakelb.c
  99. 13 0
      drivers/net/phy/Kconfig
  100. 1 0
      drivers/net/phy/Makefile

+ 75 - 0
Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt

@@ -0,0 +1,75 @@
+Properties for an MDIO bus multiplexer controlled by a memory-mapped device
+
+This is a special case of a MDIO bus multiplexer.  A memory-mapped device,
+like an FPGA, is used to control which child bus is connected.  The mdio-mux
+node must be a child of the memory-mapped device.  The driver currently only
+supports devices with eight-bit registers.
+
+Required properties in addition to the generic multiplexer properties:
+
+- compatible : string, must contain "mdio-mux-mmioreg"
+
+- reg : integer, contains the offset of the register that controls the bus
+	multiplexer.  The size field in the 'reg' property is the size of
+	register, and must therefore be 1.
+
+- mux-mask : integer, contains an eight-bit mask that specifies which
+	bits in the register control the actual bus multiplexer.  The
+	'reg' property of each child mdio-mux node must be constrained by
+	this mask.
+
+Example:
+
+The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
+For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
+A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
+BRDCFG1 that control the actual mux.
+
+	/* The FPGA node */
+	fpga: board-control@3,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
+		reg = <3 0 0x30>;
+		ranges = <0 3 0 0x30>;
+
+		mdio-mux-emi2 {
+			compatible = "mdio-mux-mmioreg", "mdio-mux";
+			mdio-parent-bus = <&xmdio0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <9 1>; // BRDCFG1
+			mux-mask = <0x6>; // EMI2
+
+			emi2_slot1: mdio@0 {	// Slot 1 XAUI (FM2)
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				phy_xgmii_slot1: ethernet-phy@0 {
+					compatible = "ethernet-phy-ieee802.3-c45";
+					reg = <4>;
+				};
+			};
+
+			emi2_slot2: mdio@2 {	// Slot 2 XAUI (FM1)
+				reg = <2>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				phy_xgmii_slot2: ethernet-phy@4 {
+					compatible = "ethernet-phy-ieee802.3-c45";
+					reg = <0>;
+				};
+			};
+		};
+	};
+
+	/* The parent MDIO bus. */
+	xmdio0: mdio@f1000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-xmdio";
+		reg = <0xf1000 0x1000>;
+		interrupts = <100 1 0 0>;
+	};

+ 4 - 3
Documentation/networking/batman-adv.txt

@@ -75,9 +75,10 @@ folder:
 
 
 There is a special folder for debugging information:
 There is a special folder for debugging information:
 
 
-#  ls /sys/kernel/debug/batman_adv/bat0/
-# bla_claim_table    log                socket             transtable_local
-# gateways           originators        transtable_global  vis_data
+# ls /sys/kernel/debug/batman_adv/bat0/
+# bla_backbone_table  log                 transtable_global
+# bla_claim_table     originators         transtable_local
+# gateways            socket              vis_data
 
 
 Some of the files contain all sort of status information  regard-
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
 ing  the  mesh  network.  For  example, you can view the table of

+ 28 - 9
Documentation/networking/ip-sysctl.txt

@@ -439,7 +439,9 @@ tcp_stdurg - BOOLEAN
 tcp_synack_retries - INTEGER
 tcp_synack_retries - INTEGER
 	Number of times SYNACKs for a passive TCP connection attempt will
 	Number of times SYNACKs for a passive TCP connection attempt will
 	be retransmitted. Should not be higher than 255. Default value
 	be retransmitted. Should not be higher than 255. Default value
-	is 5, which corresponds to ~180seconds.
+	is 5, which corresponds to 31seconds till the last retransmission
+	with the current initial RTO of 1second. With this the final timeout
+	for a passive TCP connection will happen after 63seconds.
 
 
 tcp_syncookies - BOOLEAN
 tcp_syncookies - BOOLEAN
 	Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
 	Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
@@ -465,20 +467,37 @@ tcp_syncookies - BOOLEAN
 tcp_fastopen - INTEGER
 tcp_fastopen - INTEGER
 	Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
 	Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
 	in the opening SYN packet. To use this feature, the client application
 	in the opening SYN packet. To use this feature, the client application
-	must not use connect(). Instead, it should use sendmsg() or sendto()
-	with MSG_FASTOPEN flag which performs a TCP handshake automatically.
-
-	The values (bitmap) are:
-	1: Enables sending data in the opening SYN on the client
-	5: Enables sending data in the opening SYN on the client regardless
-	   of cookie availability.
+	must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than
+	connect() to perform a TCP handshake automatically.
+
+	The values (bitmap) are
+	1: Enables sending data in the opening SYN on the client.
+	2: Enables TCP Fast Open on the server side, i.e., allowing data in
+	   a SYN packet to be accepted and passed to the application before
+	   3-way hand shake finishes.
+	4: Send data in the opening SYN regardless of cookie availability and
+	   without a cookie option.
+	0x100: Accept SYN data w/o validating the cookie.
+	0x200: Accept data-in-SYN w/o any cookie option present.
+	0x400/0x800: Enable Fast Open on all listeners regardless of the
+	   TCP_FASTOPEN socket option. The two different flags designate two
+	   different ways of setting max_qlen without the TCP_FASTOPEN socket
+	   option.
 
 
 	Default: 0
 	Default: 0
 
 
+	Note that the client & server side Fast Open flags (1 and 2
+	respectively) must be also enabled before the rest of flags can take
+	effect.
+
+	See include/net/tcp.h and the code for more details.
+
 tcp_syn_retries - INTEGER
 tcp_syn_retries - INTEGER
 	Number of times initial SYNs for an active TCP connection attempt
 	Number of times initial SYNs for an active TCP connection attempt
 	will be retransmitted. Should not be higher than 255. Default value
 	will be retransmitted. Should not be higher than 255. Default value
-	is 5, which corresponds to ~180seconds.
+	is 6, which corresponds to 63seconds till the last restransmission
+	with the current initial RTO of 1second. With this the final timeout
+	for an active TCP connection attempt will happen after 127seconds.
 
 
 tcp_timestamps - BOOLEAN
 tcp_timestamps - BOOLEAN
 	Enable timestamps as defined in RFC1323.
 	Enable timestamps as defined in RFC1323.

+ 0 - 5
Documentation/networking/stmmac.txt

@@ -173,7 +173,6 @@ Where:
 For MDIO bus The we have:
 For MDIO bus The we have:
 
 
  struct stmmac_mdio_bus_data {
  struct stmmac_mdio_bus_data {
-	int bus_id;
 	int (*phy_reset)(void *priv);
 	int (*phy_reset)(void *priv);
 	unsigned int phy_mask;
 	unsigned int phy_mask;
 	int *irqs;
 	int *irqs;
@@ -181,7 +180,6 @@ For MDIO bus The we have:
  };
  };
 
 
 Where:
 Where:
- o bus_id: bus identifier;
  o phy_reset: hook to reset the phy device attached to the bus.
  o phy_reset: hook to reset the phy device attached to the bus.
  o phy_mask: phy mask passed when register the MDIO bus within the driver.
  o phy_mask: phy mask passed when register the MDIO bus within the driver.
  o irqs: list of IRQs, one per PHY.
  o irqs: list of IRQs, one per PHY.
@@ -230,9 +228,6 @@ there are two MAC cores: one MAC is for MDIO Bus/PHY emulation
 with fixed_link support.
 with fixed_link support.
 
 
 static struct stmmac_mdio_bus_data stmmac1_mdio_bus = {
 static struct stmmac_mdio_bus_data stmmac1_mdio_bus = {
-	.bus_id = 1,
-		|
-		|-> phy device on the bus_id 1
 	.phy_reset = phy_reset;
 	.phy_reset = phy_reset;
 		|
 		|
 		|-> function to provide the phy_reset on this board
 		|-> function to provide the phy_reset on this board

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Saber-toothed Squirrel
 NAME = Saber-toothed Squirrel
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 0 - 1
drivers/Makefile

@@ -120,7 +120,6 @@ obj-$(CONFIG_VHOST_NET)		+= vhost/
 obj-$(CONFIG_VLYNQ)		+= vlynq/
 obj-$(CONFIG_VLYNQ)		+= vlynq/
 obj-$(CONFIG_STAGING)		+= staging/
 obj-$(CONFIG_STAGING)		+= staging/
 obj-y				+= platform/
 obj-y				+= platform/
-obj-y				+= ieee802154/
 #common clk code
 #common clk code
 obj-y				+= clk/
 obj-y				+= clk/
 
 

+ 1 - 0
drivers/acpi/acpica/tbxface.c

@@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature,
 
 
 	return (AE_NOT_FOUND);
 	return (AE_NOT_FOUND);
 }
 }
+ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
 
 
 acpi_status
 acpi_status
 acpi_get_table(char *signature,
 acpi_get_table(char *signature,

+ 2 - 2
drivers/bcma/Kconfig

@@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
 
 
 config BCMA_SFLASH
 config BCMA_SFLASH
 	bool
 	bool
-	depends on BCMA_DRIVER_MIPS && BROKEN
+	depends on BCMA_DRIVER_MIPS
 	default y
 	default y
 
 
 config BCMA_NFLASH
 config BCMA_NFLASH
 	bool
 	bool
-	depends on BCMA_DRIVER_MIPS && BROKEN
+	depends on BCMA_DRIVER_MIPS
 	default y
 	default y
 
 
 config BCMA_DRIVER_GMAC_CMN
 config BCMA_DRIVER_GMAC_CMN

+ 2 - 0
drivers/bcma/bcma_private.h

@@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
 #ifdef CONFIG_BCMA_SFLASH
 #ifdef CONFIG_BCMA_SFLASH
 /* driver_chipcommon_sflash.c */
 /* driver_chipcommon_sflash.c */
 int bcma_sflash_init(struct bcma_drv_cc *cc);
 int bcma_sflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_sflash_dev;
 #else
 #else
 static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
 {
@@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 #ifdef CONFIG_BCMA_NFLASH
 #ifdef CONFIG_BCMA_NFLASH
 /* driver_chipcommon_nflash.c */
 /* driver_chipcommon_nflash.c */
 int bcma_nflash_init(struct bcma_drv_cc *cc);
 int bcma_nflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_nflash_dev;
 #else
 #else
 static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
 static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
 {

+ 25 - 3
drivers/bcma/driver_chipcommon_nflash.c

@@ -5,15 +5,37 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  * Licensed under the GNU/GPL. See COPYING for details.
  */
  */
 
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 
 #include "bcma_private.h"
 #include "bcma_private.h"
 
 
+struct platform_device bcma_nflash_dev = {
+	.name		= "bcma_nflash",
+	.num_resources	= 0,
+};
+
 /* Initialize NAND flash access */
 /* Initialize NAND flash access */
 int bcma_nflash_init(struct bcma_drv_cc *cc)
 int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
 {
-	bcma_err(cc->core->bus, "NAND flash support is broken\n");
+	struct bcma_bus *bus = cc->core->bus;
+
+	if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
+	    cc->core->id.rev != 0x38) {
+		bcma_err(bus, "NAND flash on unsupported board!\n");
+		return -ENOTSUPP;
+	}
+
+	if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
+		bcma_err(bus, "NAND flash not present according to ChipCommon\n");
+		return -ENODEV;
+	}
+
+	cc->nflash.present = true;
+
+	/* Prepare platform device, but don't register it yet. It's too early,
+	 * malloc (required by device_private_init) is not available yet. */
+	bcma_nflash_dev.dev.platform_data = &cc->nflash;
+
 	return 0;
 	return 0;
 }
 }

+ 120 - 3
drivers/bcma/driver_chipcommon_sflash.c

@@ -5,15 +5,132 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  * Licensed under the GNU/GPL. See COPYING for details.
  */
  */
 
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 
 #include "bcma_private.h"
 #include "bcma_private.h"
 
 
+static struct resource bcma_sflash_resource = {
+	.name	= "bcma_sflash",
+	.start	= BCMA_SFLASH,
+	.end	= 0,
+	.flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
+};
+
+struct platform_device bcma_sflash_dev = {
+	.name		= "bcma_sflash",
+	.resource	= &bcma_sflash_resource,
+	.num_resources	= 1,
+};
+
+struct bcma_sflash_tbl_e {
+	char *name;
+	u32 id;
+	u32 blocksize;
+	u16 numblocks;
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
+	{ "", 0x14, 0x10000, 32, },
+	{ 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+	{ 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+	{ 0 },
+};
+
+static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
+{
+	int i;
+	bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
+			BCMA_CC_FLASHCTL_START | opcode);
+	for (i = 0; i < 1000; i++) {
+		if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
+		      BCMA_CC_FLASHCTL_BUSY))
+			return;
+		cpu_relax();
+	}
+	bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
+}
+
 /* Initialize serial flash access */
 /* Initialize serial flash access */
 int bcma_sflash_init(struct bcma_drv_cc *cc)
 int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
 {
-	bcma_err(cc->core->bus, "Serial flash support is broken\n");
+	struct bcma_bus *bus = cc->core->bus;
+	struct bcma_sflash *sflash = &cc->sflash;
+	struct bcma_sflash_tbl_e *e;
+	u32 id, id2;
+
+	switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+	case BCMA_CC_FLASHT_STSER:
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
+
+		bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+		id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+		bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+		id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+		switch (id) {
+		case 0xbf:
+			for (e = bcma_sflash_sst_tbl; e->name; e++) {
+				if (e->id == id2)
+					break;
+			}
+			break;
+		default:
+			for (e = bcma_sflash_st_tbl; e->name; e++) {
+				if (e->id == id)
+					break;
+			}
+			break;
+		}
+		if (!e->name) {
+			bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
+			return -ENOTSUPP;
+		}
+
+		break;
+	case BCMA_CC_FLASHT_ATSER:
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
+		id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
+
+		for (e = bcma_sflash_at_tbl; e->name; e++) {
+			if (e->id == id)
+				break;
+		}
+		if (!e->name) {
+			bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
+			return -ENOTSUPP;
+		}
+
+		break;
+	default:
+		bcma_err(bus, "Unsupported flash type\n");
+		return -ENOTSUPP;
+	}
+
+	sflash->window = BCMA_SFLASH;
+	sflash->blocksize = e->blocksize;
+	sflash->numblocks = e->numblocks;
+	sflash->size = sflash->blocksize * sflash->numblocks;
+	sflash->present = true;
+
+	bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+		  e->name, sflash->size / 1024, sflash->blocksize,
+		  sflash->numblocks);
+
+	/* Prepare platform device, but don't register it yet. It's too early,
+	 * malloc (required by device_private_init) is not available yet. */
+	bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
+					  sflash->size;
+	bcma_sflash_dev.dev.platform_data = sflash;
+
 	return 0;
 	return 0;
 }
 }

+ 17 - 0
drivers/bcma/main.c

@@ -7,6 +7,7 @@
 
 
 #include "bcma_private.h"
 #include "bcma_private.h"
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 
 
@@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
 		dev_id++;
 		dev_id++;
 	}
 	}
 
 
+#ifdef CONFIG_BCMA_SFLASH
+	if (bus->drv_cc.sflash.present) {
+		err = platform_device_register(&bcma_sflash_dev);
+		if (err)
+			bcma_err(bus, "Error registering serial flash\n");
+	}
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+	if (bus->drv_cc.nflash.present) {
+		err = platform_device_register(&bcma_nflash_dev);
+		if (err)
+			bcma_err(bus, "Error registering NAND flash\n");
+	}
+#endif
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 0
drivers/char/agp/intel-agp.h

@@ -64,6 +64,7 @@
 #define I830_PTE_SYSTEM_CACHED  0x00000006
 #define I830_PTE_SYSTEM_CACHED  0x00000006
 /* GT PTE cache control fields */
 /* GT PTE cache control fields */
 #define GEN6_PTE_UNCACHED	0x00000002
 #define GEN6_PTE_UNCACHED	0x00000002
+#define HSW_PTE_UNCACHED	0x00000000
 #define GEN6_PTE_LLC		0x00000004
 #define GEN6_PTE_LLC		0x00000004
 #define GEN6_PTE_LLC_MLC	0x00000006
 #define GEN6_PTE_LLC_MLC	0x00000006
 #define GEN6_PTE_GFDT		0x00000008
 #define GEN6_PTE_GFDT		0x00000008

+ 69 - 36
drivers/char/agp/intel-gtt.c

@@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
 	return true;
 	return true;
 }
 }
 
 
+static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
+				unsigned int flags)
+{
+	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+	u32 pte_flags;
+
+	if (type_mask == AGP_USER_MEMORY)
+		pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
+	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+		if (gfdt)
+			pte_flags |= GEN6_PTE_GFDT;
+	} else { /* set 'normal'/'cached' to LLC by default */
+		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+		if (gfdt)
+			pte_flags |= GEN6_PTE_GFDT;
+	}
+
+	/* gen6 has bit11-4 for physical addr bit39-32 */
+	addr |= (addr >> 28) & 0xff0;
+	writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
 			     unsigned int flags)
 			     unsigned int flags)
 {
 {
@@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
 	.check_flags = gen6_check_flags,
 	.check_flags = gen6_check_flags,
 	.chipset_flush = i9xx_chipset_flush,
 	.chipset_flush = i9xx_chipset_flush,
 };
 };
+static const struct intel_gtt_driver haswell_gtt_driver = {
+	.gen = 6,
+	.setup = i9xx_setup,
+	.cleanup = gen6_cleanup,
+	.write_entry = haswell_write_entry,
+	.dma_mask_size = 40,
+	.check_flags = gen6_check_flags,
+	.chipset_flush = i9xx_chipset_flush,
+};
 static const struct intel_gtt_driver valleyview_gtt_driver = {
 static const struct intel_gtt_driver valleyview_gtt_driver = {
 	.gen = 7,
 	.gen = 7,
 	.setup = i9xx_setup,
 	.setup = i9xx_setup,
@@ -1499,77 +1532,77 @@ static const struct intel_gtt_driver_description {
 	{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
 	{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
 	    "ValleyView", &valleyview_gtt_driver },
 	    "ValleyView", &valleyview_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
 	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
-	    "Haswell", &sandybridge_gtt_driver },
+	    "Haswell", &haswell_gtt_driver },
 	{ 0, NULL, NULL }
 	{ 0, NULL, NULL }
 };
 };
 
 

+ 0 - 3
drivers/gpu/drm/drm_modes.c

@@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
 	p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
 	p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
-	p->crtc_hadjusted = false;
-	p->crtc_vadjusted = false;
 }
 }
 EXPORT_SYMBOL(drm_mode_set_crtcinfo);
 EXPORT_SYMBOL(drm_mode_set_crtcinfo);
 
 

+ 2 - 2
drivers/gpu/drm/drm_proc.c

@@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
  * Create a given set of proc files represented by an array of
  * Create a given set of proc files represented by an array of
  * gdm_proc_lists in the given root directory.
  * gdm_proc_lists in the given root directory.
  */
  */
-int drm_proc_create_files(struct drm_info_list *files, int count,
+static int drm_proc_create_files(struct drm_info_list *files, int count,
 			  struct proc_dir_entry *root, struct drm_minor *minor)
 			  struct proc_dir_entry *root, struct drm_minor *minor)
 {
 {
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
@@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
 	return 0;
 	return 0;
 }
 }
 
 
-int drm_proc_remove_files(struct drm_info_list *files, int count,
+static int drm_proc_remove_files(struct drm_info_list *files, int count,
 			  struct drm_minor *minor)
 			  struct drm_minor *minor)
 {
 {
 	struct list_head *pos, *q;
 	struct list_head *pos, *q;

+ 4 - 4
drivers/gpu/drm/i915/i915_gem.c

@@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
 
 
 	/* Flush everything onto the inactive list. */
 	/* Flush everything onto the inactive list. */
 	for_each_ring(ring, dev_priv, i) {
 	for_each_ring(ring, dev_priv, i) {
+		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+		if (ret)
+			return ret;
+
 		ret = i915_ring_idle(ring);
 		ret = i915_ring_idle(ring);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
@@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev)
 		/* Is the device fubar? */
 		/* Is the device fubar? */
 		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
 		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
 			return -EBUSY;
 			return -EBUSY;
-
-		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
-		if (ret)
-			return ret;
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 4 - 1
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 		pte_flags |= GEN6_PTE_CACHE_LLC;
 		pte_flags |= GEN6_PTE_CACHE_LLC;
 		break;
 		break;
 	case I915_CACHE_NONE:
 	case I915_CACHE_NONE:
-		pte_flags |= GEN6_PTE_UNCACHED;
+		if (IS_HASWELL(dev))
+			pte_flags |= HSW_PTE_UNCACHED;
+		else
+			pte_flags |= GEN6_PTE_UNCACHED;
 		break;
 		break;
 	default:
 	default:
 		BUG();
 		BUG();

+ 1 - 0
drivers/gpu/drm/i915/i915_reg.h

@@ -115,6 +115,7 @@
 
 
 #define GEN6_PTE_VALID			(1 << 0)
 #define GEN6_PTE_VALID			(1 << 0)
 #define GEN6_PTE_UNCACHED		(1 << 1)
 #define GEN6_PTE_UNCACHED		(1 << 1)
+#define HSW_PTE_UNCACHED		(0)
 #define GEN6_PTE_CACHE_LLC		(2 << 1)
 #define GEN6_PTE_CACHE_LLC		(2 << 1)
 #define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
 #define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
 #define GEN6_PTE_CACHE_BITS		(3 << 1)
 #define GEN6_PTE_CACHE_BITS		(3 << 1)

+ 33 - 3
drivers/gpu/drm/i915/intel_crt.c

@@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 	return ret;
 	return ret;
 }
 }
 
 
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+				struct i2c_adapter *i2c)
+{
+	struct edid *edid;
+
+	edid = drm_get_edid(connector, i2c);
+
+	if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+		DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+		intel_gmbus_force_bit(i2c, true);
+		edid = drm_get_edid(connector, i2c);
+		intel_gmbus_force_bit(i2c, false);
+	}
+
+	return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+				struct i2c_adapter *adapter)
+{
+	struct edid *edid;
+
+	edid = intel_crt_get_edid(connector, adapter);
+	if (!edid)
+		return 0;
+
+	return intel_connector_update_modes(connector, edid);
+}
+
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
 {
 	struct intel_crt *crt = intel_attached_crt(connector);
 	struct intel_crt *crt = intel_attached_crt(connector);
@@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
 
 	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
 	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
-	edid = drm_get_edid(connector, i2c);
+	edid = intel_crt_get_edid(connector, i2c);
 
 
 	if (edid) {
 	if (edid) {
 		bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
 		bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
 	struct i2c_adapter *i2c;
 	struct i2c_adapter *i2c;
 
 
 	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
 	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
-	ret = intel_ddc_get_modes(connector, i2c);
+	ret = intel_crt_ddc_get_modes(connector, i2c);
 	if (ret || !IS_G4X(dev))
 	if (ret || !IS_G4X(dev))
 		return ret;
 		return ret;
 
 
 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
 	i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
 	i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
-	return intel_ddc_get_modes(connector, i2c);
+	return intel_crt_ddc_get_modes(connector, i2c);
 }
 }
 
 
 static int intel_crt_set_property(struct drm_connector *connector,
 static int intel_crt_set_property(struct drm_connector *connector,

+ 2 - 0
drivers/gpu/drm/i915/intel_drv.h

@@ -342,6 +342,8 @@ struct intel_fbc_work {
 	int interval;
 	int interval;
 };
 };
 
 
+int intel_connector_update_modes(struct drm_connector *connector,
+				struct edid *edid);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 
 
 extern void intel_attach_force_audio_property(struct drm_connector *connector);
 extern void intel_attach_force_audio_property(struct drm_connector *connector);

+ 22 - 9
drivers/gpu/drm/i915/intel_modes.c

@@ -32,6 +32,25 @@
 #include "intel_drv.h"
 #include "intel_drv.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
 
 
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+				struct edid *edid)
+{
+	int ret;
+
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	drm_edid_to_eld(connector, edid);
+	connector->display_info.raw_edid = NULL;
+	kfree(edid);
+
+	return ret;
+}
+
 /**
 /**
  * intel_ddc_get_modes - get modelist from monitor
  * intel_ddc_get_modes - get modelist from monitor
  * @connector: DRM connector device to use
  * @connector: DRM connector device to use
@@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
 			struct i2c_adapter *adapter)
 			struct i2c_adapter *adapter)
 {
 {
 	struct edid *edid;
 	struct edid *edid;
-	int ret = 0;
 
 
 	edid = drm_get_edid(connector, adapter);
 	edid = drm_get_edid(connector, adapter);
-	if (edid) {
-		drm_mode_connector_update_edid_property(connector, edid);
-		ret = drm_add_edid_modes(connector, edid);
-		drm_edid_to_eld(connector, edid);
-		connector->display_info.raw_edid = NULL;
-		kfree(edid);
-	}
+	if (!edid)
+		return 0;
 
 
-	return ret;
+	return intel_connector_update_modes(connector, edid);
 }
 }
 
 
 static const struct drm_prop_enum_list force_audio_names[] = {
 static const struct drm_prop_enum_list force_audio_names[] = {

+ 4 - 11
drivers/gpu/drm/i915/intel_pm.c

@@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev)
 		   dev_priv->max_delay << 24 |
 		   dev_priv->max_delay << 24 |
 		   dev_priv->min_delay << 16);
 		   dev_priv->min_delay << 16);
 
 
-	if (IS_HASWELL(dev)) {
-		I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-		I915_WRITE(GEN6_RP_UP_EI, 66000);
-		I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-	} else {
-		I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
-		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
-		I915_WRITE(GEN6_RP_UP_EI, 100000);
-		I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
-	}
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+	I915_WRITE(GEN6_RP_UP_EI, 66000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
 
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 	I915_WRITE(GEN6_RP_CONTROL,
 	I915_WRITE(GEN6_RP_CONTROL,

+ 1 - 0
drivers/gpu/drm/i915/intel_sdvo.c

@@ -1692,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
 	edid = intel_sdvo_get_edid(connector);
 	edid = intel_sdvo_get_edid(connector);
 	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
 	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
 		has_audio = drm_detect_monitor_audio(edid);
 		has_audio = drm_detect_monitor_audio(edid);
+	kfree(edid);
 
 
 	return has_audio;
 	return has_audio;
 }
 }

+ 21 - 4
drivers/gpu/drm/radeon/atombios_crtc.c

@@ -444,11 +444,28 @@ union atom_enable_ss {
 static void atombios_crtc_program_ss(struct radeon_device *rdev,
 static void atombios_crtc_program_ss(struct radeon_device *rdev,
 				     int enable,
 				     int enable,
 				     int pll_id,
 				     int pll_id,
+				     int crtc_id,
 				     struct radeon_atom_ss *ss)
 				     struct radeon_atom_ss *ss)
 {
 {
+	unsigned i;
 	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
 	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
 	union atom_enable_ss args;
 	union atom_enable_ss args;
 
 
+	if (!enable) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->mode_info.crtcs[i] &&
+			    rdev->mode_info.crtcs[i]->enabled &&
+			    i != crtc_id &&
+			    pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+				/* one other crtc is using this pll don't turn
+				 * off spread spectrum as it might turn off
+				 * display on active crtc
+				 */
+				return;
+			}
+		}
+	}
+
 	memset(&args, 0, sizeof(args));
 	memset(&args, 0, sizeof(args));
 
 
 	if (ASIC_IS_DCE5(rdev)) {
 	if (ASIC_IS_DCE5(rdev)) {
@@ -1028,7 +1045,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
 		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
 		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
 					  &ref_div, &post_div);
 					  &ref_div, &post_div);
 
 
-	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
 
 
 	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
 	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
 				  encoder_mode, radeon_encoder->encoder_id, mode->clock,
 				  encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1051,7 +1068,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
 			ss.step = step_size;
 			ss.step = step_size;
 		}
 		}
 
 
-		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
 	}
 	}
 }
 }
 
 
@@ -1572,11 +1589,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
 								   ASIC_INTERNAL_SS_ON_DCPLL,
 								   ASIC_INTERNAL_SS_ON_DCPLL,
 								   rdev->clock.default_dispclk);
 								   rdev->clock.default_dispclk);
 		if (ss_enabled)
 		if (ss_enabled)
-			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
+			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
 		/* XXX: DCE5, make sure voltage, dispclk is high enough */
 		/* XXX: DCE5, make sure voltage, dispclk is high enough */
 		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
 		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
 		if (ss_enabled)
 		if (ss_enabled)
-			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
+			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
 	}
 	}
 
 
 }
 }

+ 88 - 17
drivers/gpu/drm/radeon/r600_cs.c

@@ -47,13 +47,17 @@ struct r600_cs_track {
 	u32			npipes;
 	u32			npipes;
 	/* value we track */
 	/* value we track */
 	u32			sq_config;
 	u32			sq_config;
+	u32			log_nsamples;
 	u32			nsamples;
 	u32			nsamples;
 	u32			cb_color_base_last[8];
 	u32			cb_color_base_last[8];
 	struct radeon_bo	*cb_color_bo[8];
 	struct radeon_bo	*cb_color_bo[8];
 	u64			cb_color_bo_mc[8];
 	u64			cb_color_bo_mc[8];
-	u32			cb_color_bo_offset[8];
-	struct radeon_bo	*cb_color_frag_bo[8]; /* unused */
-	struct radeon_bo	*cb_color_tile_bo[8]; /* unused */
+	u64			cb_color_bo_offset[8];
+	struct radeon_bo	*cb_color_frag_bo[8];
+	u64			cb_color_frag_offset[8];
+	struct radeon_bo	*cb_color_tile_bo[8];
+	u64			cb_color_tile_offset[8];
+	u32			cb_color_mask[8];
 	u32			cb_color_info[8];
 	u32			cb_color_info[8];
 	u32			cb_color_view[8];
 	u32			cb_color_view[8];
 	u32			cb_color_size_idx[8]; /* unused */
 	u32			cb_color_size_idx[8]; /* unused */
@@ -349,10 +353,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 	unsigned array_mode;
 	unsigned array_mode;
 	u32 format;
 	u32 format;
 
 
-	if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
-		dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
-		return -EINVAL;
-	}
 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
 	if (!r600_fmt_is_valid_color(format)) {
 	if (!r600_fmt_is_valid_color(format)) {
@@ -420,7 +420,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 	}
 	}
 
 
 	/* check offset */
 	/* check offset */
-	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
+	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
+	      r600_fmt_get_blocksize(format) * track->nsamples;
 	switch (array_mode) {
 	switch (array_mode) {
 	default:
 	default:
 	case V_0280A0_ARRAY_LINEAR_GENERAL:
 	case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -441,7 +442,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 			 * broken userspace.
 			 * broken userspace.
 			 */
 			 */
 		} else {
 		} else {
-			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
+			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
 				 __func__, i, array_mode,
 				 __func__, i, array_mode,
 				 track->cb_color_bo_offset[i], tmp,
 				 track->cb_color_bo_offset[i], tmp,
 				 radeon_bo_size(track->cb_color_bo[i]),
 				 radeon_bo_size(track->cb_color_bo[i]),
@@ -458,6 +459,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
 	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
 	ib[track->cb_color_size_idx[i]] = tmp;
 	ib[track->cb_color_size_idx[i]] = tmp;
+
+	/* FMASK/CMASK */
+	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+	case V_0280A0_TILE_DISABLE:
+		break;
+	case V_0280A0_FRAG_ENABLE:
+		if (track->nsamples > 1) {
+			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
+			/* the tile size is 8x8, but the size is in units of bits.
+			 * for bytes, do just * 8. */
+			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
+
+			if (bytes + track->cb_color_frag_offset[i] >
+			    radeon_bo_size(track->cb_color_frag_bo[i])) {
+				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
+					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+					 __func__, tile_max, bytes,
+					 track->cb_color_frag_offset[i],
+					 radeon_bo_size(track->cb_color_frag_bo[i]));
+				return -EINVAL;
+			}
+		}
+		/* fall through */
+	case V_0280A0_CLEAR_ENABLE:
+	{
+		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
+		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
+		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
+		uint32_t bytes = (block_max + 1) * 128;
+
+		if (bytes + track->cb_color_tile_offset[i] >
+		    radeon_bo_size(track->cb_color_tile_bo[i])) {
+			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
+				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+				 __func__, block_max, bytes,
+				 track->cb_color_tile_offset[i],
+				 radeon_bo_size(track->cb_color_tile_bo[i]));
+			return -EINVAL;
+		}
+		break;
+	}
+	default:
+		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+		return -EINVAL;
+	}
 	return 0;
 	return 0;
 }
 }
 
 
@@ -566,7 +612,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
 
 
 		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
 		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
 		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
 		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
-		tmp = ntiles * bpe * 64 * nviews;
+		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
 		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
 		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
 			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
 			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
 					array_mode,
 					array_mode,
@@ -1231,6 +1277,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 		break;
 		break;
 	case R_028C04_PA_SC_AA_CONFIG:
 	case R_028C04_PA_SC_AA_CONFIG:
 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+		track->log_nsamples = tmp;
 		track->nsamples = 1 << tmp;
 		track->nsamples = 1 << tmp;
 		track->cb_dirty = true;
 		track->cb_dirty = true;
 		break;
 		break;
@@ -1312,16 +1359,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
-			ib[idx] = track->cb_color_base_last[tmp];
 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
 		} else {
 		} else {
 			r = r600_cs_packet_next_reloc(p, &reloc);
 			r = r600_cs_packet_next_reloc(p, &reloc);
 			if (r) {
 			if (r) {
 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
-			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
 			track->cb_color_frag_bo[tmp] = reloc->robj;
 			track->cb_color_frag_bo[tmp] = reloc->robj;
+			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
 		}
 		}
 		break;
 		break;
 	case R_0280C0_CB_COLOR0_TILE:
 	case R_0280C0_CB_COLOR0_TILE:
@@ -1338,16 +1390,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
-			ib[idx] = track->cb_color_base_last[tmp];
 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
 		} else {
 		} else {
 			r = r600_cs_packet_next_reloc(p, &reloc);
 			r = r600_cs_packet_next_reloc(p, &reloc);
 			if (r) {
 			if (r) {
 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
-			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
 			track->cb_color_tile_bo[tmp] = reloc->robj;
 			track->cb_color_tile_bo[tmp] = reloc->robj;
+			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case R_028100_CB_COLOR0_MASK:
+	case R_028104_CB_COLOR1_MASK:
+	case R_028108_CB_COLOR2_MASK:
+	case R_02810C_CB_COLOR3_MASK:
+	case R_028110_CB_COLOR4_MASK:
+	case R_028114_CB_COLOR5_MASK:
+	case R_028118_CB_COLOR6_MASK:
+	case R_02811C_CB_COLOR7_MASK:
+		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
+		track->cb_color_mask[tmp] = ib[idx];
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
 		}
 		}
 		break;
 		break;
 	case CB_COLOR0_BASE:
 	case CB_COLOR0_BASE:
@@ -1492,7 +1563,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)
 }
 }
 
 
 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
-			      unsigned w0, unsigned h0, unsigned d0, unsigned format,
+			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
 			      unsigned block_align, unsigned height_align, unsigned base_align,
 			      unsigned block_align, unsigned height_align, unsigned base_align,
 			      unsigned *l0_size, unsigned *mipmap_size)
 			      unsigned *l0_size, unsigned *mipmap_size)
 {
 {
@@ -1520,7 +1591,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
 
 
 		depth = r600_mip_minify(d0, i);
 		depth = r600_mip_minify(d0, i);
 
 
-		size = nbx * nby * blocksize;
+		size = nbx * nby * blocksize * nsamples;
 		if (nfaces)
 		if (nfaces)
 			size *= nfaces;
 			size *= nfaces;
 		else
 		else
@@ -1672,7 +1743,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
 
 
 		nfaces = larray - barray + 1;
 		nfaces = larray - barray + 1;
 	}
 	}
-	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
+	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
 			  pitch_align, height_align, base_align,
 			  pitch_align, height_align, base_align,
 			  &l0_size, &mipmap_size);
 			  &l0_size, &mipmap_size);
 	/* using get ib will give us the offset into the texture bo */
 	/* using get ib will give us the offset into the texture bo */

+ 17 - 0
drivers/gpu/drm/radeon/r600d.h

@@ -92,6 +92,20 @@
 #define R_028094_CB_COLOR5_VIEW                      0x028094
 #define R_028094_CB_COLOR5_VIEW                      0x028094
 #define R_028098_CB_COLOR6_VIEW                      0x028098
 #define R_028098_CB_COLOR6_VIEW                      0x028098
 #define R_02809C_CB_COLOR7_VIEW                      0x02809C
 #define R_02809C_CB_COLOR7_VIEW                      0x02809C
+#define R_028100_CB_COLOR0_MASK                      0x028100
+#define   S_028100_CMASK_BLOCK_MAX(x)                  (((x) & 0xFFF) << 0)
+#define   G_028100_CMASK_BLOCK_MAX(x)                  (((x) >> 0) & 0xFFF)
+#define   C_028100_CMASK_BLOCK_MAX                     0xFFFFF000
+#define   S_028100_FMASK_TILE_MAX(x)                   (((x) & 0xFFFFF) << 12)
+#define   G_028100_FMASK_TILE_MAX(x)                   (((x) >> 12) & 0xFFFFF)
+#define   C_028100_FMASK_TILE_MAX                      0x00000FFF
+#define R_028104_CB_COLOR1_MASK                      0x028104
+#define R_028108_CB_COLOR2_MASK                      0x028108
+#define R_02810C_CB_COLOR3_MASK                      0x02810C
+#define R_028110_CB_COLOR4_MASK                      0x028110
+#define R_028114_CB_COLOR5_MASK                      0x028114
+#define R_028118_CB_COLOR6_MASK                      0x028118
+#define R_02811C_CB_COLOR7_MASK                      0x02811C
 #define CB_COLOR0_INFO                                  0x280a0
 #define CB_COLOR0_INFO                                  0x280a0
 #	define CB_FORMAT(x)				((x) << 2)
 #	define CB_FORMAT(x)				((x) << 2)
 #       define CB_ARRAY_MODE(x)                         ((x) << 8)
 #       define CB_ARRAY_MODE(x)                         ((x) << 8)
@@ -1400,6 +1414,9 @@
 #define   S_0280A0_TILE_MODE(x)                        (((x) & 0x3) << 18)
 #define   S_0280A0_TILE_MODE(x)                        (((x) & 0x3) << 18)
 #define   G_0280A0_TILE_MODE(x)                        (((x) >> 18) & 0x3)
 #define   G_0280A0_TILE_MODE(x)                        (((x) >> 18) & 0x3)
 #define   C_0280A0_TILE_MODE                           0xFFF3FFFF
 #define   C_0280A0_TILE_MODE                           0xFFF3FFFF
+#define     V_0280A0_TILE_DISABLE			0
+#define     V_0280A0_CLEAR_ENABLE			1
+#define     V_0280A0_FRAG_ENABLE			2
 #define   S_0280A0_BLEND_CLAMP(x)                      (((x) & 0x1) << 20)
 #define   S_0280A0_BLEND_CLAMP(x)                      (((x) & 0x1) << 20)
 #define   G_0280A0_BLEND_CLAMP(x)                      (((x) >> 20) & 0x1)
 #define   G_0280A0_BLEND_CLAMP(x)                      (((x) >> 20) & 0x1)
 #define   C_0280A0_BLEND_CLAMP                         0xFFEFFFFF
 #define   C_0280A0_BLEND_CLAMP                         0xFFEFFFFF

+ 0 - 15
drivers/gpu/drm/radeon/radeon.h

@@ -142,21 +142,6 @@ struct radeon_device;
 /*
 /*
  * BIOS.
  * BIOS.
  */
  */
-#define ATRM_BIOS_PAGE 4096
-
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_atrm_supported(struct pci_dev *pdev);
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
-#else
-static inline bool radeon_atrm_supported(struct pci_dev *pdev)
-{
-	return false;
-}
-
-static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
-	return -EINVAL;
-}
-#endif
 bool radeon_get_bios(struct radeon_device *rdev);
 bool radeon_get_bios(struct radeon_device *rdev);
 
 
 /*
 /*

+ 1 - 1
drivers/gpu/drm/radeon/radeon_atombios.c

@@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 	}
 	}
 
 
 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
-	if ((dev->pdev->device == 0x9802) &&
+	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
 	    (dev->pdev->subsystem_vendor == 0x1734) &&
 	    (dev->pdev->subsystem_vendor == 0x1734) &&
 	    (dev->pdev->subsystem_device == 0x11bd)) {
 	    (dev->pdev->subsystem_device == 0x11bd)) {
 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {

+ 1 - 55
drivers/gpu/drm/radeon/radeon_atpx_handler.c

@@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
 	/* handle for device - and atpx */
 	/* handle for device - and atpx */
 	acpi_handle dhandle;
 	acpi_handle dhandle;
 	acpi_handle atpx_handle;
 	acpi_handle atpx_handle;
-	acpi_handle atrm_handle;
 } radeon_atpx_priv;
 } radeon_atpx_priv;
 
 
-/* retrieve the ROM in 4k blocks */
-static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
-			    int offset, int len)
-{
-	acpi_status status;
-	union acpi_object atrm_arg_elements[2], *obj;
-	struct acpi_object_list atrm_arg;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
-	atrm_arg.count = 2;
-	atrm_arg.pointer = &atrm_arg_elements[0];
-
-	atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
-	atrm_arg_elements[0].integer.value = offset;
-
-	atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
-	atrm_arg_elements[1].integer.value = len;
-
-	status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
-	if (ACPI_FAILURE(status)) {
-		printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
-		return -ENODEV;
-	}
-
-	obj = (union acpi_object *)buffer.pointer;
-	memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
-	len = obj->buffer.length;
-	kfree(buffer.pointer);
-	return len;
-}
-
-bool radeon_atrm_supported(struct pci_dev *pdev)
-{
-	/* get the discrete ROM only via ATRM */
-	if (!radeon_atpx_priv.atpx_detected)
-		return false;
-
-	if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
-		return false;
-	return true;
-}
-
-
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
-	return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
-}
-
 static int radeon_atpx_get_version(acpi_handle handle)
 static int radeon_atpx_get_version(acpi_handle handle)
 {
 {
 	acpi_status status;
 	acpi_status status;
@@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
 
 
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
 {
-	acpi_handle dhandle, atpx_handle, atrm_handle;
+	acpi_handle dhandle, atpx_handle;
 	acpi_status status;
 	acpi_status status;
 
 
 	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
 	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 	if (ACPI_FAILURE(status))
 	if (ACPI_FAILURE(status))
 		return false;
 		return false;
 
 
-	status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
-	if (ACPI_FAILURE(status))
-		return false;
-
 	radeon_atpx_priv.dhandle = dhandle;
 	radeon_atpx_priv.dhandle = dhandle;
 	radeon_atpx_priv.atpx_handle = atpx_handle;
 	radeon_atpx_priv.atpx_handle = atpx_handle;
-	radeon_atpx_priv.atrm_handle = atrm_handle;
 	return true;
 	return true;
 }
 }
 
 

+ 134 - 4
drivers/gpu/drm/radeon/radeon_bios.c

@@ -32,6 +32,7 @@
 
 
 #include <linux/vga_switcheroo.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/acpi.h>
 /*
 /*
  * BIOS.
  * BIOS.
  */
  */
@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
 	return true;
 	return true;
 }
 }
 
 
+#ifdef CONFIG_ACPI
 /* ATRM is used to get the BIOS on the discrete cards in
 /* ATRM is used to get the BIOS on the discrete cards in
  * dual-gpu systems.
  * dual-gpu systems.
  */
  */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+			    int offset, int len)
+{
+	acpi_status status;
+	union acpi_object atrm_arg_elements[2], *obj;
+	struct acpi_object_list atrm_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+	atrm_arg.count = 2;
+	atrm_arg.pointer = &atrm_arg_elements[0];
+
+	atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[0].integer.value = offset;
+
+	atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[1].integer.value = len;
+
+	status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+		return -ENODEV;
+	}
+
+	obj = (union acpi_object *)buffer.pointer;
+	memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+	len = obj->buffer.length;
+	kfree(buffer.pointer);
+	return len;
+}
+
 static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 {
 {
 	int ret;
 	int ret;
 	int size = 256 * 1024;
 	int size = 256 * 1024;
 	int i;
 	int i;
+	struct pci_dev *pdev = NULL;
+	acpi_handle dhandle, atrm_handle;
+	acpi_status status;
+	bool found = false;
+
+	/* ATRM is for the discrete card only */
+	if (rdev->flags & RADEON_IS_IGP)
+		return false;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+		if (!dhandle)
+			continue;
+
+		status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+		if (!ACPI_FAILURE(status)) {
+			found = true;
+			break;
+		}
+	}
 
 
-	if (!radeon_atrm_supported(rdev->pdev))
+	if (!found)
 		return false;
 		return false;
 
 
 	rdev->bios = kmalloc(size, GFP_KERNEL);
 	rdev->bios = kmalloc(size, GFP_KERNEL);
@@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 	}
 	}
 
 
 	for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
 	for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
-		ret = radeon_atrm_get_bios_chunk(rdev->bios,
-						 (i * ATRM_BIOS_PAGE),
-						 ATRM_BIOS_PAGE);
+		ret = radeon_atrm_call(atrm_handle,
+				       rdev->bios,
+				       (i * ATRM_BIOS_PAGE),
+				       ATRM_BIOS_PAGE);
 		if (ret < ATRM_BIOS_PAGE)
 		if (ret < ATRM_BIOS_PAGE)
 			break;
 			break;
 	}
 	}
@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 	}
 	}
 	return true;
 	return true;
 }
 }
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
 
 
 static bool ni_read_disabled_bios(struct radeon_device *rdev)
 static bool ni_read_disabled_bios(struct radeon_device *rdev)
 {
 {
@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
 		return legacy_read_disabled_bios(rdev);
 		return legacy_read_disabled_bios(rdev);
 }
 }
 
 
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	bool ret = false;
+	struct acpi_table_header *hdr;
+	acpi_size tbl_size;
+	UEFI_ACPI_VFCT *vfct;
+	GOP_VBIOS_CONTENT *vbios;
+	VFCT_IMAGE_HEADER *vhdr;
+
+	if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+		return false;
+	if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+		goto out_unmap;
+	}
+
+	vfct = (UEFI_ACPI_VFCT *)hdr;
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+		goto out_unmap;
+	}
+
+	vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+	vhdr = &vbios->VbiosHeader;
+	DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+			vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+			vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+	if (vhdr->PCIBus != rdev->pdev->bus->number ||
+	    vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
+	    vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
+	    vhdr->VendorID != rdev->pdev->vendor ||
+	    vhdr->DeviceID != rdev->pdev->device) {
+		DRM_INFO("ACPI VFCT table is not for this card\n");
+		goto out_unmap;
+	};
+
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+		DRM_ERROR("ACPI VFCT image truncated\n");
+		goto out_unmap;
+	}
+
+	rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+	ret = !!rdev->bios;
+
+out_unmap:
+	return ret;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
 
 
 bool radeon_get_bios(struct radeon_device *rdev)
 bool radeon_get_bios(struct radeon_device *rdev)
 {
 {
@@ -483,6 +611,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
 	uint16_t tmp;
 	uint16_t tmp;
 
 
 	r = radeon_atrm_get_bios(rdev);
 	r = radeon_atrm_get_bios(rdev);
+	if (r == false)
+		r = radeon_acpi_vfct_bios(rdev);
 	if (r == false)
 	if (r == false)
 		r = igp_read_bios_from_vram(rdev);
 		r = igp_read_bios_from_vram(rdev);
 	if (r == false)
 	if (r == false)

+ 2 - 1
drivers/gpu/drm/radeon/radeon_drv.c

@@ -62,9 +62,10 @@
  *   2.18.0 - r600-eg: allow "invalid" DB formats
  *   2.18.0 - r600-eg: allow "invalid" DB formats
  *   2.19.0 - r600-eg: MSAA textures
  *   2.19.0 - r600-eg: MSAA textures
  *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
  *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
+ *   2.21.0 - r600-r700: FMASK and CMASK
  */
  */
 #define KMS_DRIVER_MAJOR	2
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	20
+#define KMS_DRIVER_MINOR	21
 #define KMS_DRIVER_PATCHLEVEL	0
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
 int radeon_driver_unload_kms(struct drm_device *dev);

+ 1 - 2
drivers/gpu/drm/radeon/radeon_object.c

@@ -132,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev,
 	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
 	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
 				       sizeof(struct radeon_bo));
 				       sizeof(struct radeon_bo));
 
 
+retry:
 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
 	if (bo == NULL)
 	if (bo == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
@@ -145,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev,
 	bo->surface_reg = -1;
 	bo->surface_reg = -1;
 	INIT_LIST_HEAD(&bo->list);
 	INIT_LIST_HEAD(&bo->list);
 	INIT_LIST_HEAD(&bo->va);
 	INIT_LIST_HEAD(&bo->va);
-
-retry:
 	radeon_ttm_placement_from_domain(bo, domain);
 	radeon_ttm_placement_from_domain(bo, domain);
 	/* Kernel allocation are uninterruptible */
 	/* Kernel allocation are uninterruptible */
 	down_read(&rdev->pm.mclk_lock);
 	down_read(&rdev->pm.mclk_lock);

+ 1 - 0
drivers/gpu/drm/radeon/radeon_ring.c

@@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
 	if (radeon_debugfs_ring_init(rdev, ring)) {
 	if (radeon_debugfs_ring_init(rdev, ring)) {
 		DRM_ERROR("Failed to register debugfs file for rings !\n");
 		DRM_ERROR("Failed to register debugfs file for rings !\n");
 	}
 	}
+	radeon_ring_lockup_update(ring);
 	return 0;
 	return 0;
 }
 }
 
 

+ 0 - 8
drivers/gpu/drm/radeon/reg_srcs/r600

@@ -744,14 +744,6 @@ r600 0x9400
 0x00028C38 CB_CLRCMP_DST
 0x00028C38 CB_CLRCMP_DST
 0x00028C3C CB_CLRCMP_MSK
 0x00028C3C CB_CLRCMP_MSK
 0x00028C34 CB_CLRCMP_SRC
 0x00028C34 CB_CLRCMP_SRC
-0x00028100 CB_COLOR0_MASK
-0x00028104 CB_COLOR1_MASK
-0x00028108 CB_COLOR2_MASK
-0x0002810C CB_COLOR3_MASK
-0x00028110 CB_COLOR4_MASK
-0x00028114 CB_COLOR5_MASK
-0x00028118 CB_COLOR6_MASK
-0x0002811C CB_COLOR7_MASK
 0x00028808 CB_COLOR_CONTROL
 0x00028808 CB_COLOR_CONTROL
 0x0002842C CB_FOG_BLUE
 0x0002842C CB_FOG_BLUE
 0x00028428 CB_FOG_GREEN
 0x00028428 CB_FOG_GREEN

+ 1 - 2
drivers/gpu/drm/udl/udl_modeset.c

@@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
 
 
 static void udl_crtc_disable(struct drm_crtc *crtc)
 static void udl_crtc_disable(struct drm_crtc *crtc)
 {
 {
-
-
+	udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 }
 }
 
 
 static void udl_crtc_destroy(struct drm_crtc *crtc)
 static void udl_crtc_destroy(struct drm_crtc *crtc)

+ 5 - 1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c

@@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 	struct drm_framebuffer *old_fb = crtc->fb;
 	struct drm_framebuffer *old_fb = crtc->fb;
 	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
 	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
-	struct drm_file *file_priv = event->base.file_priv;
+	struct drm_file *file_priv ;
 	struct vmw_fence_obj *fence = NULL;
 	struct vmw_fence_obj *fence = NULL;
 	struct drm_clip_rect clips;
 	struct drm_clip_rect clips;
 	int ret;
 	int ret;
 
 
+	if (event == NULL)
+		return -EINVAL;
+
 	/* require ScreenObject support for page flipping */
 	/* require ScreenObject support for page flipping */
 	if (!dev_priv->sou_priv)
 	if (!dev_priv->sou_priv)
 		return -ENOSYS;
 		return -ENOSYS;
 
 
+	file_priv = event->base.file_priv;
 	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
 	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
 		return -EINVAL;
 		return -EINVAL;
 
 

+ 1 - 0
drivers/i2c/busses/i2c-diolan-u2c.c

@@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
 			}
 			}
 		}
 		}
 	}
 	}
+	ret = num;
 abort:
 abort:
 	sret = diolan_i2c_stop(dev);
 	sret = diolan_i2c_stop(dev);
 	if (sret < 0 && ret >= 0)
 	if (sret < 0 && ret >= 0)

+ 18 - 10
drivers/i2c/busses/i2c-nomadik.c

@@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
 
 
 	i2c_clk = clk_get_rate(dev->clk);
 	i2c_clk = clk_get_rate(dev->clk);
 
 
-	/* fallback to std. mode if machine has not provided it */
-	if (dev->cfg.clk_freq == 0)
-		dev->cfg.clk_freq = 100000;
-
 	/*
 	/*
 	 * The spec says, in case of std. mode the divider is
 	 * The spec says, in case of std. mode the divider is
 	 * 2 whereas it is 3 for fast and fastplus mode of
 	 * 2 whereas it is 3 for fast and fastplus mode of
@@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = {
 	.functionality	= nmk_i2c_functionality
 	.functionality	= nmk_i2c_functionality
 };
 };
 
 
+static struct nmk_i2c_controller u8500_i2c = {
+	/*
+	 * Slave data setup time; 250ns, 100ns, and 10ns, which
+	 * is 14, 6 and 2 respectively for a 48Mhz i2c clock.
+	 */
+	.slsu           = 0xe,
+	.tft            = 1,      /* Tx FIFO threshold */
+	.rft            = 8,      /* Rx FIFO threshold */
+	.clk_freq       = 400000, /* fast mode operation */
+	.timeout        = 200,    /* Slave response timeout(ms) */
+	.sm             = I2C_FREQ_MODE_FAST,
+};
+
 static atomic_t adapter_id = ATOMIC_INIT(0);
 static atomic_t adapter_id = ATOMIC_INIT(0);
 
 
 static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
 static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
 {
 {
 	int ret = 0;
 	int ret = 0;
-	struct nmk_i2c_controller *pdata =
-			adev->dev.platform_data;
+	struct nmk_i2c_controller *pdata = adev->dev.platform_data;
 	struct nmk_i2c_dev	*dev;
 	struct nmk_i2c_dev	*dev;
 	struct i2c_adapter *adap;
 	struct i2c_adapter *adap;
 
 
-	if (!pdata) {
-		dev_warn(&adev->dev, "no platform data\n");
-		return -ENODEV;
-	}
+	if (!pdata)
+		/* No i2c configuration found, using the default. */
+		pdata = &u8500_i2c;
+
 	dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
 	dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
 	if (!dev) {
 	if (!dev) {
 		dev_err(&adev->dev, "cannot allocate memory\n");
 		dev_err(&adev->dev, "cannot allocate memory\n");

+ 1 - 1
drivers/i2c/busses/i2c-omap.c

@@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 
 
 	r = pm_runtime_get_sync(dev->dev);
 	r = pm_runtime_get_sync(dev->dev);
 	if (IS_ERR_VALUE(r))
 	if (IS_ERR_VALUE(r))
-		return r;
+		goto out;
 
 
 	r = omap_i2c_wait_for_bb(dev);
 	r = omap_i2c_wait_for_bb(dev);
 	if (r < 0)
 	if (r < 0)

+ 1 - 1
drivers/i2c/busses/i2c-tegra.c

@@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int tegra_i2c_suspend(struct device *dev)
 static int tegra_i2c_suspend(struct device *dev)
 {
 {
 	struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
 	struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);

+ 2 - 2
drivers/net/Kconfig

@@ -107,8 +107,6 @@ config MII
 	  or internal device.  It is safe to say Y or M here even if your
 	  or internal device.  It is safe to say Y or M here even if your
 	  ethernet card lacks MII.
 	  ethernet card lacks MII.
 
 
-source "drivers/ieee802154/Kconfig"
-
 config IFB
 config IFB
 	tristate "Intermediate Functional Block support"
 	tristate "Intermediate Functional Block support"
 	depends on NET_CLS_ACT
 	depends on NET_CLS_ACT
@@ -290,6 +288,8 @@ source "drivers/net/wimax/Kconfig"
 
 
 source "drivers/net/wan/Kconfig"
 source "drivers/net/wan/Kconfig"
 
 
+source "drivers/net/ieee802154/Kconfig"
+
 config XEN_NETDEV_FRONTEND
 config XEN_NETDEV_FRONTEND
 	tristate "Xen network device frontend driver"
 	tristate "Xen network device frontend driver"
 	depends on XEN
 	depends on XEN

+ 1 - 0
drivers/net/Makefile

@@ -53,6 +53,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
 obj-$(CONFIG_WAN) += wan/
 obj-$(CONFIG_WAN) += wan/
 obj-$(CONFIG_WLAN) += wireless/
 obj-$(CONFIG_WLAN) += wireless/
 obj-$(CONFIG_WIMAX) += wimax/
 obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_IEEE802154) += ieee802154/
 
 
 obj-$(CONFIG_VMXNET3) += vmxnet3/
 obj-$(CONFIG_VMXNET3) += vmxnet3/
 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o

+ 20 - 11
drivers/net/bonding/bond_main.c

@@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 					    arp_work.work);
 					    arp_work.work);
 	struct slave *slave, *oldcurrent;
 	struct slave *slave, *oldcurrent;
 	int do_failover = 0;
 	int do_failover = 0;
-	int delta_in_ticks;
+	int delta_in_ticks, extra_ticks;
 	int i;
 	int i;
 
 
 	read_lock(&bond->lock);
 	read_lock(&bond->lock);
 
 
 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
+	extra_ticks = delta_in_ticks / 2;
 
 
 	if (bond->slave_cnt == 0)
 	if (bond->slave_cnt == 0)
 		goto re_arm;
 		goto re_arm;
@@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 		if (slave->link != BOND_LINK_UP) {
 		if (slave->link != BOND_LINK_UP) {
 			if (time_in_range(jiffies,
 			if (time_in_range(jiffies,
 				trans_start - delta_in_ticks,
 				trans_start - delta_in_ticks,
-				trans_start + delta_in_ticks) &&
+				trans_start + delta_in_ticks + extra_ticks) &&
 			    time_in_range(jiffies,
 			    time_in_range(jiffies,
 				slave->dev->last_rx - delta_in_ticks,
 				slave->dev->last_rx - delta_in_ticks,
-				slave->dev->last_rx + delta_in_ticks)) {
+				slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
 
 
 				slave->link  = BOND_LINK_UP;
 				slave->link  = BOND_LINK_UP;
 				bond_set_active_slave(slave);
 				bond_set_active_slave(slave);
@@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 			 */
 			 */
 			if (!time_in_range(jiffies,
 			if (!time_in_range(jiffies,
 				trans_start - delta_in_ticks,
 				trans_start - delta_in_ticks,
-				trans_start + 2 * delta_in_ticks) ||
+				trans_start + 2 * delta_in_ticks + extra_ticks) ||
 			    !time_in_range(jiffies,
 			    !time_in_range(jiffies,
 				slave->dev->last_rx - delta_in_ticks,
 				slave->dev->last_rx - delta_in_ticks,
-				slave->dev->last_rx + 2 * delta_in_ticks)) {
+				slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
 
 
 				slave->link  = BOND_LINK_DOWN;
 				slave->link  = BOND_LINK_DOWN;
 				bond_set_backup_slave(slave);
 				bond_set_backup_slave(slave);
@@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 	struct slave *slave;
 	struct slave *slave;
 	int i, commit = 0;
 	int i, commit = 0;
 	unsigned long trans_start;
 	unsigned long trans_start;
+	int extra_ticks;
+
+	/* All the time comparisons below need some extra time. Otherwise, on
+	 * fast networks the ARP probe/reply may arrive within the same jiffy
+	 * as it was sent.  Then, the next time the ARP monitor is run, one
+	 * arp_interval will already have passed in the comparisons.
+	 */
+	extra_ticks = delta_in_ticks / 2;
 
 
 	bond_for_each_slave(bond, slave, i) {
 	bond_for_each_slave(bond, slave, i) {
 		slave->new_link = BOND_LINK_NOCHANGE;
 		slave->new_link = BOND_LINK_NOCHANGE;
@@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 		if (slave->link != BOND_LINK_UP) {
 		if (slave->link != BOND_LINK_UP) {
 			if (time_in_range(jiffies,
 			if (time_in_range(jiffies,
 				slave_last_rx(bond, slave) - delta_in_ticks,
 				slave_last_rx(bond, slave) - delta_in_ticks,
-				slave_last_rx(bond, slave) + delta_in_ticks)) {
+				slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
 
 
 				slave->new_link = BOND_LINK_UP;
 				slave->new_link = BOND_LINK_UP;
 				commit++;
 				commit++;
@@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 		 */
 		 */
 		if (time_in_range(jiffies,
 		if (time_in_range(jiffies,
 				  slave->jiffies - delta_in_ticks,
 				  slave->jiffies - delta_in_ticks,
-				  slave->jiffies + 2 * delta_in_ticks))
+				  slave->jiffies + 2 * delta_in_ticks + extra_ticks))
 			continue;
 			continue;
 
 
 		/*
 		/*
@@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 		    !bond->current_arp_slave &&
 		    !bond->current_arp_slave &&
 		    !time_in_range(jiffies,
 		    !time_in_range(jiffies,
 			slave_last_rx(bond, slave) - delta_in_ticks,
 			slave_last_rx(bond, slave) - delta_in_ticks,
-			slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+			slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
 
 
 			slave->new_link = BOND_LINK_DOWN;
 			slave->new_link = BOND_LINK_DOWN;
 			commit++;
 			commit++;
@@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 		if (bond_is_active_slave(slave) &&
 		if (bond_is_active_slave(slave) &&
 		    (!time_in_range(jiffies,
 		    (!time_in_range(jiffies,
 			trans_start - delta_in_ticks,
 			trans_start - delta_in_ticks,
-			trans_start + 2 * delta_in_ticks) ||
+			trans_start + 2 * delta_in_ticks + extra_ticks) ||
 		     !time_in_range(jiffies,
 		     !time_in_range(jiffies,
 			slave_last_rx(bond, slave) - delta_in_ticks,
 			slave_last_rx(bond, slave) - delta_in_ticks,
-			slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+			slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
 
 
 			slave->new_link = BOND_LINK_DOWN;
 			slave->new_link = BOND_LINK_DOWN;
 			commit++;
 			commit++;
@@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
 			if ((!bond->curr_active_slave &&
 			if ((!bond->curr_active_slave &&
 			     time_in_range(jiffies,
 			     time_in_range(jiffies,
 					   trans_start - delta_in_ticks,
 					   trans_start - delta_in_ticks,
-					   trans_start + delta_in_ticks)) ||
+					   trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
 			    bond->curr_active_slave != slave) {
 			    bond->curr_active_slave != slave) {
 				slave->link = BOND_LINK_UP;
 				slave->link = BOND_LINK_UP;
 				if (bond->current_arp_slave) {
 				if (bond->current_arp_slave) {

+ 3 - 1
drivers/net/can/sja1000/sja1000_platform.c

@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
 	priv = netdev_priv(dev);
 	priv = netdev_priv(dev);
 
 
 	dev->irq = res_irq->start;
 	dev->irq = res_irq->start;
-	priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
+	priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+	if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+		priv->irq_flags |= IRQF_SHARED;
 	priv->reg_base = addr;
 	priv->reg_base = addr;
 	/* The CAN clock frequency is half the oscillator clock frequency */
 	/* The CAN clock frequency is half the oscillator clock frequency */
 	priv->can.clock.freq = pdata->osc_freq / 2;
 	priv->can.clock.freq = pdata->osc_freq / 2;

+ 4 - 3
drivers/net/can/softing/softing_fw.c

@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
 	const uint8_t *mem, *end, *dat;
 	const uint8_t *mem, *end, *dat;
 	uint16_t type, len;
 	uint16_t type, len;
 	uint32_t addr;
 	uint32_t addr;
-	uint8_t *buf = NULL;
+	uint8_t *buf = NULL, *new_buf;
 	int buflen = 0;
 	int buflen = 0;
 	int8_t type_end = 0;
 	int8_t type_end = 0;
 
 
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
 		if (len > buflen) {
 		if (len > buflen) {
 			/* align buflen */
 			/* align buflen */
 			buflen = (len + (1024-1)) & ~(1024-1);
 			buflen = (len + (1024-1)) & ~(1024-1);
-			buf = krealloc(buf, buflen, GFP_KERNEL);
-			if (!buf) {
+			new_buf = krealloc(buf, buflen, GFP_KERNEL);
+			if (!new_buf) {
 				ret = -ENOMEM;
 				ret = -ENOMEM;
 				goto failed;
 				goto failed;
 			}
 			}
+			buf = new_buf;
 		}
 		}
 		/* verify record data */
 		/* verify record data */
 		memcpy_fromio(buf, &dpram[addr + offset], len);
 		memcpy_fromio(buf, &dpram[addr + offset], len);

+ 0 - 3
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h

@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
 			continue;		\
 			continue;		\
 		else
 		else
 
 
-#define for_each_napi_rx_queue(bp, var) \
-	for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
-
 /* Skip OOO FP */
 /* Skip OOO FP */
 #define for_each_tx_queue(bp, var) \
 #define for_each_tx_queue(bp, var) \
 	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \

+ 4 - 0
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c

@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 	 */
 	 */
 	bnx2x_setup_tc(bp->dev, bp->max_cos);
 	bnx2x_setup_tc(bp->dev, bp->max_cos);
 
 
+	/* Add all NAPI objects */
+	bnx2x_add_all_napi(bp);
 	bnx2x_napi_enable(bp);
 	bnx2x_napi_enable(bp);
 
 
 	/* set pf load just before approaching the MCP */
 	/* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
 
 		/* Disable HW interrupts, NAPI */
 		/* Disable HW interrupts, NAPI */
 		bnx2x_netif_stop(bp, 1);
 		bnx2x_netif_stop(bp, 1);
+		/* Delete all NAPI objects */
+		bnx2x_del_all_napi(bp);
 
 
 		/* Release IRQs */
 		/* Release IRQs */
 		bnx2x_free_irq(bp);
 		bnx2x_free_irq(bp);

+ 2 - 2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h

@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
 	bp->num_napi_queues = bp->num_queues;
 	bp->num_napi_queues = bp->num_queues;
 
 
 	/* Add NAPI objects */
 	/* Add NAPI objects */
-	for_each_napi_rx_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
 			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
 }
 }
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 {
 {
 	int i;
 	int i;
 
 
-	for_each_napi_rx_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 }
 }
 
 

+ 0 - 2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c

@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
  */
  */
 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
 {
 {
-	bnx2x_del_all_napi(bp);
 	bnx2x_disable_msi(bp);
 	bnx2x_disable_msi(bp);
 	BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
 	BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
 	bnx2x_set_int_mode(bp);
 	bnx2x_set_int_mode(bp);
-	bnx2x_add_all_napi(bp);
 }
 }
 
 
 /**
 /**

+ 9 - 9
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c

@@ -8427,6 +8427,8 @@ unload_error:
 
 
 	/* Disable HW interrupts, NAPI */
 	/* Disable HW interrupts, NAPI */
 	bnx2x_netif_stop(bp, 1);
 	bnx2x_netif_stop(bp, 1);
+	/* Delete all NAPI objects */
+	bnx2x_del_all_napi(bp);
 
 
 	/* Release IRQs */
 	/* Release IRQs */
 	bnx2x_free_irq(bp);
 	bnx2x_free_irq(bp);
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static void poll_bnx2x(struct net_device *dev)
 static void poll_bnx2x(struct net_device *dev)
 {
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	struct bnx2x *bp = netdev_priv(dev);
+	int i;
 
 
-	disable_irq(bp->pdev->irq);
-	bnx2x_interrupt(bp->pdev->irq, dev);
-	enable_irq(bp->pdev->irq);
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+	}
 }
 }
 #endif
 #endif
 
 
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
 	 */
 	 */
 	bnx2x_set_int_mode(bp);
 	bnx2x_set_int_mode(bp);
 
 
-	/* Add all NAPI objects */
-	bnx2x_add_all_napi(bp);
-
 	rc = register_netdev(dev);
 	rc = register_netdev(dev);
 	if (rc) {
 	if (rc) {
 		dev_err(&pdev->dev, "Cannot register net device\n");
 		dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 
 
 	unregister_netdev(dev);
 	unregister_netdev(dev);
 
 
-	/* Delete all NAPI objects */
-	bnx2x_del_all_napi(bp);
-
 	/* Power on: we can't let PCI layer write to us while we are in D3 */
 	/* Power on: we can't let PCI layer write to us while we are in D3 */
 	bnx2x_set_power_state(bp, PCI_D0);
 	bnx2x_set_power_state(bp, PCI_D0);
 
 
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 	bnx2x_tx_disable(bp);
 	bnx2x_tx_disable(bp);
 
 
 	bnx2x_netif_stop(bp, 0);
 	bnx2x_netif_stop(bp, 0);
+	/* Delete all NAPI objects */
+	bnx2x_del_all_napi(bp);
 
 
 	del_timer_sync(&bp->timer);
 	del_timer_sync(&bp->timer);
 
 

+ 1 - 0
drivers/net/ethernet/emulex/benet/be.h

@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define MAX_RX_POST		BE_NAPI_WEIGHT /* Frags posted at a time */
 #define MAX_RX_POST		BE_NAPI_WEIGHT /* Frags posted at a time */
 #define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)
 #define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)
 
 
+#define MAX_VFS			30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN		32
 #define FW_VER_LEN		32
 
 
 struct be_dma_mem {
 struct be_dma_mem {

+ 5 - 3
drivers/net/ethernet/emulex/benet/be_cmds.c

@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
 
 
 		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
 		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
 			dev_warn(&adapter->pdev->dev,
 			dev_warn(&adapter->pdev->dev,
-				 "opcode %d-%d is not permitted\n",
+				 "VF is not privileged to issue opcode %d-%d\n",
 				 opcode, subsystem);
 				 opcode, subsystem);
 		} else {
 		} else {
 			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
 	int num = 0, status = 0;
 	int num = 0, status = 0;
 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
 
-	spin_lock_bh(&adapter->mcc_cq_lock);
+	spin_lock(&adapter->mcc_cq_lock);
 	while ((compl = be_mcc_compl_get(adapter))) {
 	while ((compl = be_mcc_compl_get(adapter))) {
 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
 			/* Interpret flags as an async trailer */
 			/* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
 	if (num)
 	if (num)
 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
 
 
-	spin_unlock_bh(&adapter->mcc_cq_lock);
+	spin_unlock(&adapter->mcc_cq_lock);
 	return status;
 	return status;
 }
 }
 
 
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
 		if (be_error(adapter))
 		if (be_error(adapter))
 			return -EIO;
 			return -EIO;
 
 
+		local_bh_disable();
 		status = be_process_mcc(adapter);
 		status = be_process_mcc(adapter);
+		local_bh_enable();
 
 
 		if (atomic_read(&mcc_obj->q.used) == 0)
 		if (atomic_read(&mcc_obj->q.used) == 0)
 			break;
 			break;

+ 14 - 10
drivers/net/ethernet/emulex/benet/be_main.c

@@ -2176,8 +2176,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
 {
 {
 	u32 num = 0;
 	u32 num = 0;
 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-	     !sriov_want(adapter) && be_physfn(adapter) &&
-	     !be_is_mc(adapter)) {
+	     !sriov_want(adapter) && be_physfn(adapter)) {
 		num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
 		num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
 		num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
 		num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
 	}
 	}
@@ -2646,8 +2645,8 @@ static int be_vf_setup(struct be_adapter *adapter)
 	}
 	}
 
 
 	for_all_vfs(adapter, vf_cfg, vf) {
 	for_all_vfs(adapter, vf_cfg, vf) {
-		status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-						  NULL, vf + 1);
+		lnk_speed = 1000;
+		status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
 		if (status)
 		if (status)
 			goto err;
 			goto err;
 		vf_cfg->tx_rate = lnk_speed * 10;
 		vf_cfg->tx_rate = lnk_speed * 10;
@@ -2724,6 +2723,8 @@ static int be_get_config(struct be_adapter *adapter)
 	if (pos) {
 	if (pos) {
 		pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
 		pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
 				     &dev_num_vfs);
 				     &dev_num_vfs);
+		if (!lancer_chip(adapter))
+			dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
 		adapter->dev_num_vfs = dev_num_vfs;
 		adapter->dev_num_vfs = dev_num_vfs;
 	}
 	}
 	return 0;
 	return 0;
@@ -3437,6 +3438,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
 	if (mem->va)
 	if (mem->va)
 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
 				  mem->dma);
 				  mem->dma);
+	kfree(adapter->pmac_id);
 }
 }
 
 
 static int be_ctrl_init(struct be_adapter *adapter)
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3475,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
 	}
 	}
 	memset(rx_filter->va, 0, rx_filter->size);
 	memset(rx_filter->va, 0, rx_filter->size);
 
 
+	/* primary mac needs 1 pmac entry */
+	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+				   sizeof(*adapter->pmac_id), GFP_KERNEL);
+	if (!adapter->pmac_id)
+		return -ENOMEM;
+
 	mutex_init(&adapter->mbox_lock);
 	mutex_init(&adapter->mbox_lock);
 	spin_lock_init(&adapter->mcc_lock);
 	spin_lock_init(&adapter->mcc_lock);
 	spin_lock_init(&adapter->mcc_cq_lock);
 	spin_lock_init(&adapter->mcc_cq_lock);
@@ -3609,12 +3617,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
 	else
 	else
 		adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
 		adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
 
 
-	/* primary mac needs 1 pmac entry */
-	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
-				  sizeof(u32), GFP_KERNEL);
-	if (!adapter->pmac_id)
-		return -ENOMEM;
-
 	status = be_cmd_get_cntl_attributes(adapter);
 	status = be_cmd_get_cntl_attributes(adapter);
 	if (status)
 	if (status)
 		return status;
 		return status;
@@ -3763,7 +3765,9 @@ static void be_worker(struct work_struct *work)
 	/* when interrupts are not yet enabled, just reap any pending
 	/* when interrupts are not yet enabled, just reap any pending
 	* mcc completions */
 	* mcc completions */
 	if (!netif_running(adapter->netdev)) {
 	if (!netif_running(adapter->netdev)) {
+		local_bh_disable();
 		be_process_mcc(adapter);
 		be_process_mcc(adapter);
+		local_bh_enable();
 		goto reschedule;
 		goto reschedule;
 	}
 	}
 
 

+ 7 - 0
drivers/net/ethernet/freescale/Kconfig

@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
 	---help---
 	---help---
 	  This driver supports the MDIO bus used by the gianfar and UCC drivers.
 	  This driver supports the MDIO bus used by the gianfar and UCC drivers.
 
 
+config FSL_XGMAC_MDIO
+	tristate "Freescale XGMAC MDIO"
+	depends on FSL_SOC
+	select PHYLIB
+	---help---
+	  This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
 config UCC_GETH
 config UCC_GETH
 	tristate "Freescale QE Gigabit Ethernet"
 	tristate "Freescale QE Gigabit Ethernet"
 	depends on QUICC_ENGINE
 	depends on QUICC_ENGINE

+ 1 - 0
drivers/net/ethernet/freescale/Makefile

@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
 endif
 endif
 obj-$(CONFIG_FS_ENET) += fs_enet/
 obj-$(CONFIG_FS_ENET) += fs_enet/
 obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
 obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
 obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
 gianfar_driver-objs := gianfar.o \
 gianfar_driver-objs := gianfar.o \

+ 292 - 257
drivers/net/ethernet/freescale/fsl_pq_mdio.c

@@ -19,54 +19,90 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
-#include <linux/unistd.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
-#include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/crc32.h>
 #include <linux/mii.h>
 #include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 #include <linux/of_mdio.h>
 #include <linux/of_mdio.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
 
 
 #include <asm/io.h>
 #include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/ucc.h>
+#include <asm/ucc.h>	/* for ucc_set_qe_mux_mii_mng() */
 
 
 #include "gianfar.h"
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
+
+#define MIIMIND_BUSY		0x00000001
+#define MIIMIND_NOTVALID	0x00000004
+#define MIIMCFG_INIT_VALUE	0x00000007
+#define MIIMCFG_RESET		0x80000000
+
+#define MII_READ_COMMAND	0x00000001
+
+struct fsl_pq_mii {
+	u32 miimcfg;	/* MII management configuration reg */
+	u32 miimcom;	/* MII management command reg */
+	u32 miimadd;	/* MII management address reg */
+	u32 miimcon;	/* MII management control reg */
+	u32 miimstat;	/* MII management status reg */
+	u32 miimind;	/* MII management indication reg */
+};
+
+struct fsl_pq_mdio {
+	u8 res1[16];
+	u32 ieventm;	/* MDIO Interrupt event register (for etsec2)*/
+	u32 imaskm;	/* MDIO Interrupt mask register (for etsec2)*/
+	u8 res2[4];
+	u32 emapm;	/* MDIO Event mapping register (for etsec2)*/
+	u8 res3[1280];
+	struct fsl_pq_mii mii;
+	u8 res4[28];
+	u32 utbipar;	/* TBI phy address reg (only on UCC) */
+	u8 res5[2728];
+} __packed;
 
 
 /* Number of microseconds to wait for an MII register to respond */
 /* Number of microseconds to wait for an MII register to respond */
 #define MII_TIMEOUT	1000
 #define MII_TIMEOUT	1000
 
 
 struct fsl_pq_mdio_priv {
 struct fsl_pq_mdio_priv {
 	void __iomem *map;
 	void __iomem *map;
-	struct fsl_pq_mdio __iomem *regs;
+	struct fsl_pq_mii __iomem *regs;
+	int irqs[PHY_MAX_ADDR];
+};
+
+/*
+ * Per-device-type data.  Each type of device tree node that we support gets
+ * one of these.
+ *
+ * @mii_offset: the offset of the MII registers within the memory map of the
+ * node.  Some nodes define only the MII registers, and some define the whole
+ * MAC (which includes the MII registers).
+ *
+ * @get_tbipa: determines the address of the TBIPA register
+ *
+ * @ucc_configure: a special function for extra QE configuration
+ */
+struct fsl_pq_mdio_data {
+	unsigned int mii_offset;	/* offset of the MII registers */
+	uint32_t __iomem * (*get_tbipa)(void __iomem *p);
+	void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
 };
 };
 
 
 /*
 /*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus attached to the local interface, which may be different from the
- * generic mdio bus (tied to a single interface), waiting until the write is
- * done before returning. This is helpful in programming interfaces like
- * the TBI which control interfaces like onchip SERDES and are always tied to
- * the local mdio pins, which may not be the same as system mdio bus, used for
+ * Write value to the PHY at mii_id at register regnum, on the bus attached
+ * to the local interface, which may be different from the generic mdio bus
+ * (tied to a single interface), waiting until the write is done before
+ * returning. This is helpful in programming interfaces like the TBI which
+ * control interfaces like onchip SERDES and are always tied to the local
+ * mdio pins, which may not be the same as system mdio bus, used for
  * controlling the external PHYs, for example.
  * controlling the external PHYs, for example.
  */
  */
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-		int regnum, u16 value)
+static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+		u16 value)
 {
 {
+	struct fsl_pq_mdio_priv *priv = bus->priv;
+	struct fsl_pq_mii __iomem *regs = priv->regs;
 	u32 status;
 	u32 status;
 
 
 	/* Set the PHY address and the register address we want to write */
 	/* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
 }
 }
 
 
 /*
 /*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.  All PHY operation
- * done on the bus attached to the local interface,
- * which may be different from the generic mdio bus
- * This is helpful in programming interfaces like
- * the TBI which, in turn, control interfaces like onchip SERDES
- * and are always tied to the local mdio pins, which may not be the
+ * Read the bus for PHY at addr mii_id, register regnum, and return the value.
+ * Clears miimcom first.
+ *
+ * All PHY operation done on the bus attached to the local interface, which
+ * may be different from the generic mdio bus.  This is helpful in programming
+ * interfaces like the TBI which, in turn, control interfaces like on-chip
+ * SERDES and are always tied to the local mdio pins, which may not be the
  * same as system mdio bus, used for controlling the external PHYs, for eg.
  * same as system mdio bus, used for controlling the external PHYs, for eg.
  */
  */
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
-		int mii_id, int regnum)
+static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
 {
-	u16 value;
+	struct fsl_pq_mdio_priv *priv = bus->priv;
+	struct fsl_pq_mii __iomem *regs = priv->regs;
 	u32 status;
 	u32 status;
+	u16 value;
 
 
 	/* Set the PHY address and the register address we want to read */
 	/* Set the PHY address and the register address we want to read */
 	out_be32(&regs->miimadd, (mii_id << 8) | regnum);
 	out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
 	/* Grab the value of the register from miimstat */
 	/* Grab the value of the register from miimstat */
 	value = in_be32(&regs->miimstat);
 	value = in_be32(&regs->miimstat);
 
 
+	dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
 	return value;
 	return value;
 }
 }
 
 
-static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
-{
-	struct fsl_pq_mdio_priv *priv = bus->priv;
-
-	return priv->regs;
-}
-
-/*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus, waiting until the write is done before returning.
- */
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
-{
-	struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-	/* Write to the local MII regs */
-	return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
-}
-
-/*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.
- */
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
-	struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-	/* Read the local MII regs */
-	return fsl_pq_local_mdio_read(regs, mii_id, regnum);
-}
-
 /* Reset the MIIM registers, and wait for the bus to free */
 /* Reset the MIIM registers, and wait for the bus to free */
 static int fsl_pq_mdio_reset(struct mii_bus *bus)
 static int fsl_pq_mdio_reset(struct mii_bus *bus)
 {
 {
-	struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+	struct fsl_pq_mdio_priv *priv = bus->priv;
+	struct fsl_pq_mii __iomem *regs = priv->regs;
 	u32 status;
 	u32 status;
 
 
 	mutex_lock(&bus->mdio_lock);
 	mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
 	mutex_unlock(&bus->mdio_lock);
 	mutex_unlock(&bus->mdio_lock);
 
 
 	if (!status) {
 	if (!status) {
-		printk(KERN_ERR "%s: The MII Bus is stuck!\n",
-				bus->name);
+		dev_err(&bus->dev, "timeout waiting for MII bus\n");
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+/*
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
 {
 {
-	const u32 *addr;
-	u64 taddr = OF_BAD_ADDR;
-
-	addr = of_get_address(np, 0, NULL, NULL);
-	if (addr)
-		taddr = of_translate_address(np, addr);
+	struct gfar __iomem *enet_regs = p;
 
 
-	snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
-		(unsigned long long)taddr);
+	return &enet_regs->tbipa;
 }
 }
-EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
 
+/*
+ * Return the TBIPAR address for an eTSEC2 node
+ */
+static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
+{
+	return p;
+}
+#endif
 
 
-static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+/*
+ * Return the TBIPAR address for a QE MDIO node
+ */
+static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
 {
 {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-	struct gfar __iomem *enet_regs;
+	struct fsl_pq_mdio __iomem *mdio = p;
 
 
-	/*
-	 * This is mildly evil, but so is our hardware for doing this.
-	 * Also, we have to cast back to struct gfar because of
-	 * definition weirdness done in gianfar.h.
-	 */
-	if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-		of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-		of_device_is_compatible(np, "gianfar")) {
-		enet_regs = (struct gfar __iomem *)regs;
-		return &enet_regs->tbipa;
-	} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-			of_device_is_compatible(np, "fsl,etsec2-tbi")) {
-		return of_iomap(np, 1);
-	}
-#endif
-	return NULL;
+	return &mdio->utbipar;
 }
 }
 
 
-
-static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
+/*
+ * Find the UCC node that controls the given MDIO node
+ *
+ * For some reason, the QE MDIO nodes are not children of the UCC devices
+ * that control them.  Therefore, we need to scan all UCC nodes looking for
+ * the one that encompases the given MDIO node.  We do this by comparing
+ * physical addresses.  The 'start' and 'end' addresses of the MDIO node are
+ * passed, and the correct UCC node will cover the entire address range.
+ *
+ * This assumes that there is only one QE MDIO node in the entire device tree.
+ */
+static void ucc_configure(phys_addr_t start, phys_addr_t end)
 {
 {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+	static bool found_mii_master;
 	struct device_node *np = NULL;
 	struct device_node *np = NULL;
-	int err = 0;
 
 
-	for_each_compatible_node(np, NULL, "ucc_geth") {
-		struct resource tempres;
+	if (found_mii_master)
+		return;
 
 
-		err = of_address_to_resource(np, 0, &tempres);
-		if (err)
+	for_each_compatible_node(np, NULL, "ucc_geth") {
+		struct resource res;
+		const uint32_t *iprop;
+		uint32_t id;
+		int ret;
+
+		ret = of_address_to_resource(np, 0, &res);
+		if (ret < 0) {
+			pr_debug("fsl-pq-mdio: no address range in node %s\n",
+				 np->full_name);
 			continue;
 			continue;
+		}
 
 
 		/* if our mdio regs fall within this UCC regs range */
 		/* if our mdio regs fall within this UCC regs range */
-		if ((start >= tempres.start) && (end <= tempres.end)) {
-			/* Find the id of the UCC */
-			const u32 *id;
-
-			id = of_get_property(np, "cell-index", NULL);
-			if (!id) {
-				id = of_get_property(np, "device-id", NULL);
-				if (!id)
-					continue;
+		if ((start < res.start) || (end > res.end))
+			continue;
+
+		iprop = of_get_property(np, "cell-index", NULL);
+		if (!iprop) {
+			iprop = of_get_property(np, "device-id", NULL);
+			if (!iprop) {
+				pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
+					 np->full_name);
+				continue;
 			}
 			}
+		}
 
 
-			*ucc_id = *id;
+		id = be32_to_cpup(iprop);
 
 
-			return 0;
+		/*
+		 * cell-index and device-id for QE nodes are
+		 * numbered from 1, not 0.
+		 */
+		if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
+			pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
+				 np->full_name);
+			continue;
 		}
 		}
+
+		pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
+		found_mii_master = true;
 	}
 	}
+}
 
 
-	if (err)
-		return err;
-	else
-		return -EINVAL;
-#else
-	return -ENODEV;
 #endif
 #endif
-}
 
 
-static int fsl_pq_mdio_probe(struct platform_device *ofdev)
+static struct of_device_id fsl_pq_mdio_match[] = {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+	{
+		.compatible = "fsl,gianfar-tbi",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = 0,
+			.get_tbipa = get_gfar_tbipa,
+		},
+	},
+	{
+		.compatible = "fsl,gianfar-mdio",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = 0,
+			.get_tbipa = get_gfar_tbipa,
+		},
+	},
+	{
+		.type = "mdio",
+		.compatible = "gianfar",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = offsetof(struct fsl_pq_mdio, mii),
+			.get_tbipa = get_gfar_tbipa,
+		},
+	},
+	{
+		.compatible = "fsl,etsec2-tbi",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = offsetof(struct fsl_pq_mdio, mii),
+			.get_tbipa = get_etsec_tbipa,
+		},
+	},
+	{
+		.compatible = "fsl,etsec2-mdio",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = offsetof(struct fsl_pq_mdio, mii),
+			.get_tbipa = get_etsec_tbipa,
+		},
+	},
+#endif
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+	{
+		.compatible = "fsl,ucc-mdio",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = 0,
+			.get_tbipa = get_ucc_tbipa,
+			.ucc_configure = ucc_configure,
+		},
+	},
+	{
+		/* Legacy UCC MDIO node */
+		.type = "mdio",
+		.compatible = "ucc_geth_phy",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = 0,
+			.get_tbipa = get_ucc_tbipa,
+			.ucc_configure = ucc_configure,
+		},
+	},
+#endif
+	/* No Kconfig option for Fman support yet */
+	{
+		.compatible = "fsl,fman-mdio",
+		.data = &(struct fsl_pq_mdio_data) {
+			.mii_offset = 0,
+			/* Fman TBI operations are handled elsewhere */
+		},
+	},
+
+	{},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
 {
-	struct device_node *np = ofdev->dev.of_node;
+	const struct of_device_id *id =
+		of_match_device(fsl_pq_mdio_match, &pdev->dev);
+	const struct fsl_pq_mdio_data *data = id->data;
+	struct device_node *np = pdev->dev.of_node;
+	struct resource res;
 	struct device_node *tbi;
 	struct device_node *tbi;
 	struct fsl_pq_mdio_priv *priv;
 	struct fsl_pq_mdio_priv *priv;
-	struct fsl_pq_mdio __iomem *regs = NULL;
-	void __iomem *map;
-	u32 __iomem *tbipa;
 	struct mii_bus *new_bus;
 	struct mii_bus *new_bus;
-	int tbiaddr = -1;
-	const u32 *addrp;
-	u64 addr = 0, size = 0;
 	int err;
 	int err;
 
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
+	dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
 
-	new_bus = mdiobus_alloc();
-	if (!new_bus) {
-		err = -ENOMEM;
-		goto err_free_priv;
-	}
+	new_bus = mdiobus_alloc_size(sizeof(*priv));
+	if (!new_bus)
+		return -ENOMEM;
 
 
+	priv = new_bus->priv;
 	new_bus->name = "Freescale PowerQUICC MII Bus",
 	new_bus->name = "Freescale PowerQUICC MII Bus",
-	new_bus->read = &fsl_pq_mdio_read,
-	new_bus->write = &fsl_pq_mdio_write,
-	new_bus->reset = &fsl_pq_mdio_reset,
-	new_bus->priv = priv;
-	fsl_pq_mdio_bus_name(new_bus->id, np);
-
-	addrp = of_get_address(np, 0, &size, NULL);
-	if (!addrp) {
-		err = -EINVAL;
-		goto err_free_bus;
+	new_bus->read = &fsl_pq_mdio_read;
+	new_bus->write = &fsl_pq_mdio_write;
+	new_bus->reset = &fsl_pq_mdio_reset;
+	new_bus->irq = priv->irqs;
+
+	err = of_address_to_resource(np, 0, &res);
+	if (err < 0) {
+		dev_err(&pdev->dev, "could not obtain address information\n");
+		goto error;
 	}
 	}
 
 
-	/* Set the PHY base address */
-	addr = of_translate_address(np, addrp);
-	if (addr == OF_BAD_ADDR) {
-		err = -EINVAL;
-		goto err_free_bus;
-	}
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+		(unsigned long long)res.start);
 
 
-	map = ioremap(addr, size);
-	if (!map) {
+	priv->map = of_iomap(np, 0);
+	if (!priv->map) {
 		err = -ENOMEM;
 		err = -ENOMEM;
-		goto err_free_bus;
+		goto error;
 	}
 	}
-	priv->map = map;
-
-	if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-			of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-			of_device_is_compatible(np, "fsl,ucc-mdio") ||
-			of_device_is_compatible(np, "ucc_geth_phy"))
-		map -= offsetof(struct fsl_pq_mdio, miimcfg);
-	regs = map;
-	priv->regs = regs;
-
-	new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 
 
-	if (NULL == new_bus->irq) {
-		err = -ENOMEM;
-		goto err_unmap_regs;
+	/*
+	 * Some device tree nodes represent only the MII registers, and
+	 * others represent the MAC and MII registers.  The 'mii_offset' field
+	 * contains the offset of the MII registers inside the mapped register
+	 * space.
+	 */
+	if (data->mii_offset > resource_size(&res)) {
+		dev_err(&pdev->dev, "invalid register map\n");
+		err = -EINVAL;
+		goto error;
 	}
 	}
+	priv->regs = priv->map + data->mii_offset;
 
 
-	new_bus->parent = &ofdev->dev;
-	dev_set_drvdata(&ofdev->dev, new_bus);
-
-	if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-			of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-			of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-			of_device_is_compatible(np, "fsl,etsec2-tbi") ||
-			of_device_is_compatible(np, "gianfar")) {
-		tbipa = get_gfar_tbipa(regs, np);
-		if (!tbipa) {
-			err = -EINVAL;
-			goto err_free_irqs;
-		}
-	} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
-			of_device_is_compatible(np, "ucc_geth_phy")) {
-		u32 id;
-		static u32 mii_mng_master;
-
-		tbipa = &regs->utbipar;
-
-		if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
-			goto err_free_irqs;
+	new_bus->parent = &pdev->dev;
+	dev_set_drvdata(&pdev->dev, new_bus);
 
 
-		if (!mii_mng_master) {
-			mii_mng_master = id;
-			ucc_set_qe_mux_mii_mng(id - 1);
+	if (data->get_tbipa) {
+		for_each_child_of_node(np, tbi) {
+			if (strcmp(tbi->type, "tbi-phy") == 0) {
+				dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
+					strrchr(tbi->full_name, '/') + 1);
+				break;
+			}
 		}
 		}
-	} else {
-		err = -ENODEV;
-		goto err_free_irqs;
-	}
 
 
-	for_each_child_of_node(np, tbi) {
-		if (!strncmp(tbi->type, "tbi-phy", 8))
-			break;
-	}
+		if (tbi) {
+			const u32 *prop = of_get_property(tbi, "reg", NULL);
+			uint32_t __iomem *tbipa;
 
 
-	if (tbi) {
-		const u32 *prop = of_get_property(tbi, "reg", NULL);
+			if (!prop) {
+				dev_err(&pdev->dev,
+					"missing 'reg' property in node %s\n",
+					tbi->full_name);
+				err = -EBUSY;
+				goto error;
+			}
 
 
-		if (prop)
-			tbiaddr = *prop;
+			tbipa = data->get_tbipa(priv->map);
 
 
-		if (tbiaddr == -1) {
-			err = -EBUSY;
-			goto err_free_irqs;
-		} else {
-			out_be32(tbipa, tbiaddr);
+			out_be32(tbipa, be32_to_cpup(prop));
 		}
 		}
 	}
 	}
 
 
+	if (data->ucc_configure)
+		data->ucc_configure(res.start, res.end);
+
 	err = of_mdiobus_register(new_bus, np);
 	err = of_mdiobus_register(new_bus, np);
 	if (err) {
 	if (err) {
-		printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
-				new_bus->name);
-		goto err_free_irqs;
+		dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
+			new_bus->name);
+		goto error;
 	}
 	}
 
 
 	return 0;
 	return 0;
 
 
-err_free_irqs:
-	kfree(new_bus->irq);
-err_unmap_regs:
-	iounmap(priv->map);
-err_free_bus:
+error:
+	if (priv->map)
+		iounmap(priv->map);
+
 	kfree(new_bus);
 	kfree(new_bus);
-err_free_priv:
-	kfree(priv);
+
 	return err;
 	return err;
 }
 }
 
 
 
 
-static int fsl_pq_mdio_remove(struct platform_device *ofdev)
+static int fsl_pq_mdio_remove(struct platform_device *pdev)
 {
 {
-	struct device *device = &ofdev->dev;
+	struct device *device = &pdev->dev;
 	struct mii_bus *bus = dev_get_drvdata(device);
 	struct mii_bus *bus = dev_get_drvdata(device);
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 	struct fsl_pq_mdio_priv *priv = bus->priv;
 
 
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
 	dev_set_drvdata(device, NULL);
 	dev_set_drvdata(device, NULL);
 
 
 	iounmap(priv->map);
 	iounmap(priv->map);
-	bus->priv = NULL;
 	mdiobus_free(bus);
 	mdiobus_free(bus);
-	kfree(priv);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static struct of_device_id fsl_pq_mdio_match[] = {
-	{
-		.type = "mdio",
-		.compatible = "ucc_geth_phy",
-	},
-	{
-		.type = "mdio",
-		.compatible = "gianfar",
-	},
-	{
-		.compatible = "fsl,ucc-mdio",
-	},
-	{
-		.compatible = "fsl,gianfar-tbi",
-	},
-	{
-		.compatible = "fsl,gianfar-mdio",
-	},
-	{
-		.compatible = "fsl,etsec2-tbi",
-	},
-	{
-		.compatible = "fsl,etsec2-mdio",
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
-
 static struct platform_driver fsl_pq_mdio_driver = {
 static struct platform_driver fsl_pq_mdio_driver = {
 	.driver = {
 	.driver = {
 		.name = "fsl-pq_mdio",
 		.name = "fsl-pq_mdio",

+ 0 - 52
drivers/net/ethernet/freescale/fsl_pq_mdio.h

@@ -1,52 +0,0 @@
-/*
- * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
- * Driver for the MDIO bus controller on Freescale PowerQUICC processors
- *
- * Author: Andy Fleming
- * Modifier: Sandeep Gopalpet
- *
- * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-#ifndef __FSL_PQ_MDIO_H
-#define __FSL_PQ_MDIO_H
-
-#define MIIMIND_BUSY            0x00000001
-#define MIIMIND_NOTVALID        0x00000004
-#define MIIMCFG_INIT_VALUE	0x00000007
-#define MIIMCFG_RESET           0x80000000
-
-#define MII_READ_COMMAND       0x00000001
-
-struct fsl_pq_mdio {
-	u8 res1[16];
-	u32 ieventm;	/* MDIO Interrupt event register (for etsec2)*/
-	u32 imaskm;	/* MDIO Interrupt mask register (for etsec2)*/
-	u8 res2[4];
-	u32 emapm;	/* MDIO Event mapping register (for etsec2)*/
-	u8 res3[1280];
-	u32 miimcfg;		/* MII management configuration reg */
-	u32 miimcom;		/* MII management command reg */
-	u32 miimadd;		/* MII management address reg */
-	u32 miimcon;		/* MII management control reg */
-	u32 miimstat;		/* MII management status reg */
-	u32 miimind;		/* MII management indication reg */
-	u8 reserved[28];	/* Space holder */
-	u32 utbipar;		/* TBI phy address reg (only on UCC) */
-	u8 res4[2728];
-} __packed;
-
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-			  int regnum, u16 value);
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
-int __init fsl_pq_mdio_init(void);
-void fsl_pq_mdio_exit(void);
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
-#endif /* FSL_PQ_MDIO_H */

+ 1 - 2
drivers/net/ethernet/freescale/gianfar.c

@@ -100,7 +100,6 @@
 #include <linux/of_net.h>
 #include <linux/of_net.h>
 
 
 #include "gianfar.h"
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
 
 
 #define TX_TIMEOUT      (1*HZ)
 #define TX_TIMEOUT      (1*HZ)
 
 
@@ -1041,7 +1040,7 @@ static int gfar_probe(struct platform_device *ofdev)
 
 
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
 		dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 		dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+		dev->features |= NETIF_F_HW_VLAN_RX;
 	}
 	}
 
 
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {

+ 0 - 1
drivers/net/ethernet/freescale/ucc_geth.c

@@ -42,7 +42,6 @@
 #include <asm/machdep.h>
 #include <asm/machdep.h>
 
 
 #include "ucc_geth.h"
 #include "ucc_geth.h"
-#include "fsl_pq_mdio.h"
 
 
 #undef DEBUG
 #undef DEBUG
 
 

+ 274 - 0
drivers/net/ethernet/freescale/xgmac_mdio.c

@@ -0,0 +1,274 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ *          Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT	1000
+
+struct tgec_mdio_controller {
+	__be32	reserved[12];
+	__be32	mdio_stat;	/* MDIO configuration and status */
+	__be32	mdio_ctl;	/* MDIO control */
+	__be32	mdio_data;	/* MDIO data */
+	__be32	mdio_addr;	/* MDIO address */
+} __packed;
+
+#define MDIO_STAT_CLKDIV(x)	(((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY		(1 << 0)
+#define MDIO_STAT_RD_ER		(1 << 1)
+#define MDIO_CTL_DEV_ADDR(x) 	(x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)	((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS	(1 << 10)
+#define MDIO_CTL_SCAN_EN	(1 << 11)
+#define MDIO_CTL_POST_INC	(1 << 14)
+#define MDIO_CTL_READ		(1 << 15)
+
+#define MDIO_DATA(x)		(x & 0xffff)
+#define MDIO_DATA_BSY		(1 << 31)
+
+/*
+ * Wait untill the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+				 struct tgec_mdio_controller __iomem *regs)
+{
+	uint32_t status;
+
+	/* Wait till the bus is free */
+	status = spin_event_timeout(
+		!((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
+	if (!status) {
+		dev_err(dev, "timeout waiting for bus to be free\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+				 struct tgec_mdio_controller __iomem *regs)
+{
+	uint32_t status;
+
+	/* Wait till the MDIO write is complete */
+	status = spin_event_timeout(
+		!((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
+	if (!status) {
+		dev_err(dev, "timeout waiting for operation to complete\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns.  All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+	struct tgec_mdio_controller __iomem *regs = bus->priv;
+	uint16_t dev_addr = regnum >> 16;
+	int ret;
+
+	/* Setup the MII Mgmt clock speed */
+	out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Set the port and dev addr */
+	out_be32(&regs->mdio_ctl,
+		 MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+
+	/* Set the register address */
+	out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Write the value to the register */
+	out_be32(&regs->mdio_data, MDIO_DATA(value));
+
+	ret = xgmac_wait_until_done(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first.  All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+	struct tgec_mdio_controller __iomem *regs = bus->priv;
+	uint16_t dev_addr = regnum >> 16;
+	uint32_t mdio_ctl;
+	uint16_t value;
+	int ret;
+
+	/* Setup the MII Mgmt clock speed */
+	out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Set the Port and Device Addrs */
+	mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+	out_be32(&regs->mdio_ctl, mdio_ctl);
+
+	/* Set the register address */
+	out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Initiate the read */
+	out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+
+	ret = xgmac_wait_until_done(&bus->dev, regs);
+	if (ret)
+		return ret;
+
+	/* Return all Fs if nothing was there */
+	if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+		dev_err(&bus->dev, "MDIO read error\n");
+		return 0xffff;
+	}
+
+	value = in_be32(&regs->mdio_data) & 0xffff;
+	dev_dbg(&bus->dev, "read %04x\n", value);
+
+	return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int xgmac_mdio_reset(struct mii_bus *bus)
+{
+	struct tgec_mdio_controller __iomem *regs = bus->priv;
+	int ret;
+
+	mutex_lock(&bus->mdio_lock);
+
+	/* Setup the MII Mgmt clock speed */
+	out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+	ret = xgmac_wait_until_free(&bus->dev, regs);
+
+	mutex_unlock(&bus->mdio_lock);
+
+	return ret;
+}
+
+static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct mii_bus *bus;
+	struct resource res;
+	int ret;
+
+	ret = of_address_to_resource(np, 0, &res);
+	if (ret) {
+		dev_err(&pdev->dev, "could not obtain address\n");
+		return ret;
+	}
+
+	bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+	if (!bus)
+		return -ENOMEM;
+
+	bus->name = "Freescale XGMAC MDIO Bus";
+	bus->read = xgmac_mdio_read;
+	bus->write = xgmac_mdio_write;
+	bus->reset = xgmac_mdio_reset;
+	bus->irq = bus->priv;
+	bus->parent = &pdev->dev;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+	/* Set the PHY base address */
+	bus->priv = of_iomap(np, 0);
+	if (!bus->priv) {
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
+
+	ret = of_mdiobus_register(bus, np);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot register MDIO bus\n");
+		goto err_registration;
+	}
+
+	dev_set_drvdata(&pdev->dev, bus);
+
+	return 0;
+
+err_registration:
+	iounmap(bus->priv);
+
+err_ioremap:
+	mdiobus_free(bus);
+
+	return ret;
+}
+
+static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
+{
+	struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+
+	mdiobus_unregister(bus);
+	iounmap(bus->priv);
+	mdiobus_free(bus);
+
+	return 0;
+}
+
+static struct of_device_id xgmac_mdio_match[] = {
+	{
+		.compatible = "fsl,fman-xmdio",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+	.driver = {
+		.name = "fsl-fman_xmdio",
+		.of_match_table = xgmac_mdio_match,
+	},
+	.probe = xgmac_mdio_probe,
+	.remove = xgmac_mdio_remove,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");

+ 2 - 2
drivers/net/ethernet/intel/e1000e/82571.c

@@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
  **/
  **/
 static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
 static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
 {
-	u16 data = er32(POEMB);
+	u32 data = er32(POEMB);
 
 
 	if (active)
 	if (active)
 		data |= E1000_PHY_CTRL_D0A_LPLU;
 		data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
  **/
  **/
 static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
 static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
 {
-	u16 data = er32(POEMB);
+	u32 data = er32(POEMB);
 
 
 	if (!active) {
 	if (!active) {
 		data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
 		data &= ~E1000_PHY_CTRL_NOND0A_LPLU;

+ 1 - 0
drivers/net/ethernet/intel/e1000e/e1000.h

@@ -310,6 +310,7 @@ struct e1000_adapter {
 	 */
 	 */
 	struct e1000_ring *tx_ring /* One per active queue */
 	struct e1000_ring *tx_ring /* One per active queue */
 						____cacheline_aligned_in_smp;
 						____cacheline_aligned_in_smp;
+	u32 tx_fifo_limit;
 
 
 	struct napi_struct napi;
 	struct napi_struct napi;
 
 

+ 2 - 1
drivers/net/ethernet/intel/e1000e/ethtool.c

@@ -1942,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (ec->rx_coalesce_usecs == 4) {
 	if (ec->rx_coalesce_usecs == 4) {
-		adapter->itr = adapter->itr_setting = 4;
+		adapter->itr_setting = 4;
+		adapter->itr = adapter->itr_setting;
 	} else if (ec->rx_coalesce_usecs <= 3) {
 	} else if (ec->rx_coalesce_usecs <= 3) {
 		adapter->itr = 20000;
 		adapter->itr = 20000;
 		adapter->itr_setting = ec->rx_coalesce_usecs;
 		adapter->itr_setting = ec->rx_coalesce_usecs;

+ 36 - 31
drivers/net/ethernet/intel/e1000e/netdev.c

@@ -56,7 +56,7 @@
 
 
 #define DRV_EXTRAVERSION "-k"
 #define DRV_EXTRAVERSION "-k"
 
 
-#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
+#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 const char e1000e_driver_version[] = DRV_VERSION;
 
 
@@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
 
 
 			/*
 			/*
 			 * if short on Rx space, Rx wins and must trump Tx
 			 * if short on Rx space, Rx wins and must trump Tx
-			 * adjustment or use Early Receive if available
+			 * adjustment
 			 */
 			 */
 			if (pba < min_rx_space)
 			if (pba < min_rx_space)
 				pba = min_rx_space;
 				pba = min_rx_space;
@@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
 		break;
 		break;
 	}
 	}
 
 
+	/*
+	 * Alignment of Tx data is on an arbitrary byte boundary with the
+	 * maximum size per Tx descriptor limited only to the transmit
+	 * allocation of the packet buffer minus 96 bytes with an upper
+	 * limit of 24KB due to receive synchronization limitations.
+	 */
+	adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+				       24 << 10);
+
 	/*
 	/*
 	 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
 	 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
 	 * fit in receive buffer.
 	 * fit in receive buffer.
@@ -3746,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
 	e_dbg("icr is %08X\n", icr);
 	e_dbg("icr is %08X\n", icr);
 	if (icr & E1000_ICR_RXSEQ) {
 	if (icr & E1000_ICR_RXSEQ) {
 		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
 		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+		/*
+		 * Force memory writes to complete before acknowledging the
+		 * interrupt is handled.
+		 */
 		wmb();
 		wmb();
 	}
 	}
 
 
@@ -3787,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
 		goto msi_test_failed;
 		goto msi_test_failed;
 	}
 	}
 
 
+	/*
+	 * Force memory writes to complete before enabling and firing an
+	 * interrupt.
+	 */
 	wmb();
 	wmb();
 
 
 	e1000_irq_enable(adapter);
 	e1000_irq_enable(adapter);
@@ -3798,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
 
 
 	e1000_irq_disable(adapter);
 	e1000_irq_disable(adapter);
 
 
-	rmb();
+	rmb();			/* read flags after interrupt has been fired */
 
 
 	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
 	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
 		adapter->int_mode = E1000E_INT_MODE_LEGACY;
 		adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4661,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
 	struct e1000_buffer *buffer_info;
 	struct e1000_buffer *buffer_info;
 	unsigned int i;
 	unsigned int i;
 	u32 cmd_length = 0;
 	u32 cmd_length = 0;
-	u16 ipcse = 0, tucse, mss;
+	u16 ipcse = 0, mss;
 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
 
 
 	if (!skb_is_gso(skb))
 	if (!skb_is_gso(skb))
@@ -4695,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
 	ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
 	ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
 	tucss = skb_transport_offset(skb);
 	tucss = skb_transport_offset(skb);
 	tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
 	tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
-	tucse = 0;
 
 
 	cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
 	cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
 	               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
 	               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -4709,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
 	context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
 	context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
 	context_desc->upper_setup.tcp_fields.tucss = tucss;
 	context_desc->upper_setup.tcp_fields.tucss = tucss;
 	context_desc->upper_setup.tcp_fields.tucso = tucso;
 	context_desc->upper_setup.tcp_fields.tucso = tucso;
-	context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+	context_desc->upper_setup.tcp_fields.tucse = 0;
 	context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
 	context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
 	context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
 	context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
 	context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 	context_desc->cmd_and_length = cpu_to_le32(cmd_length);
@@ -4785,12 +4801,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
 	return 1;
 	return 1;
 }
 }
 
 
-#define E1000_MAX_PER_TXD	8192
-#define E1000_MAX_TXD_PWR	12
-
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
 			unsigned int first, unsigned int max_per_txd,
 			unsigned int first, unsigned int max_per_txd,
-			unsigned int nr_frags, unsigned int mss)
+			unsigned int nr_frags)
 {
 {
 	struct e1000_adapter *adapter = tx_ring->adapter;
 	struct e1000_adapter *adapter = tx_ring->adapter;
 	struct pci_dev *pdev = adapter->pdev;
 	struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5036,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 
 
 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 {
 {
+	BUG_ON(size > tx_ring->count);
+
 	if (e1000_desc_unused(tx_ring) >= size)
 	if (e1000_desc_unused(tx_ring) >= size)
 		return 0;
 		return 0;
 	return __e1000_maybe_stop_tx(tx_ring, size);
 	return __e1000_maybe_stop_tx(tx_ring, size);
 }
 }
 
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 				    struct net_device *netdev)
 				    struct net_device *netdev)
 {
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	unsigned int first;
 	unsigned int first;
-	unsigned int max_per_txd = E1000_MAX_PER_TXD;
-	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
 	unsigned int tx_flags = 0;
 	unsigned int tx_flags = 0;
 	unsigned int len = skb_headlen(skb);
 	unsigned int len = skb_headlen(skb);
 	unsigned int nr_frags;
 	unsigned int nr_frags;
@@ -5056,18 +5068,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 	}
 	}
 
 
 	mss = skb_shinfo(skb)->gso_size;
 	mss = skb_shinfo(skb)->gso_size;
-	/*
-	 * The controller does a simple calculation to
-	 * make sure there is enough room in the FIFO before
-	 * initiating the DMA for each buffer.  The calc is:
-	 * 4 = ceil(buffer len/mss).  To make sure we don't
-	 * overrun the FIFO, adjust the max buffer len if mss
-	 * drops.
-	 */
 	if (mss) {
 	if (mss) {
 		u8 hdr_len;
 		u8 hdr_len;
-		max_per_txd = min(mss << 2, max_per_txd);
-		max_txd_pwr = fls(max_per_txd) - 1;
 
 
 		/*
 		/*
 		 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
 		 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5099,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 		count++;
 		count++;
 	count++;
 	count++;
 
 
-	count += TXD_USE_COUNT(len, max_txd_pwr);
+	count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
 
 
 	nr_frags = skb_shinfo(skb)->nr_frags;
 	nr_frags = skb_shinfo(skb)->nr_frags;
 	for (f = 0; f < nr_frags; f++)
 	for (f = 0; f < nr_frags; f++)
-		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
-				       max_txd_pwr);
+		count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+				      adapter->tx_fifo_limit);
 
 
 	if (adapter->hw.mac.tx_pkt_filtering)
 	if (adapter->hw.mac.tx_pkt_filtering)
 		e1000_transfer_dhcp_info(adapter, skb);
 		e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5146,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
 
 
 	/* if count is 0 then mapping error has occurred */
 	/* if count is 0 then mapping error has occurred */
-	count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+	count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+			     nr_frags);
 	if (count) {
 	if (count) {
 		skb_tx_timestamp(skb);
 		skb_tx_timestamp(skb);
 
 
 		netdev_sent_queue(netdev, skb->len);
 		netdev_sent_queue(netdev, skb->len);
 		e1000_tx_queue(tx_ring, tx_flags, count);
 		e1000_tx_queue(tx_ring, tx_flags, count);
 		/* Make sure there is space in the ring for the next send. */
 		/* Make sure there is space in the ring for the next send. */
-		e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
-
+		e1000_maybe_stop_tx(tx_ring,
+				    (MAX_SKB_FRAGS *
+				     DIV_ROUND_UP(PAGE_SIZE,
+						  adapter->tx_fifo_limit) + 2));
 	} else {
 	} else {
 		dev_kfree_skb_any(skb);
 		dev_kfree_skb_any(skb);
 		tx_ring->buffer_info[first].time_stamp = 0;
 		tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6332,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 	adapter->hw.phy.autoneg_advertised = 0x2f;
 	adapter->hw.phy.autoneg_advertised = 0x2f;
 
 
 	/* ring size defaults */
 	/* ring size defaults */
-	adapter->rx_ring->count = 256;
-	adapter->tx_ring->count = 256;
+	adapter->rx_ring->count = E1000_DEFAULT_RXD;
+	adapter->tx_ring->count = E1000_DEFAULT_TXD;
 
 
 	/*
 	/*
 	 * Initial Wake on LAN setting - If APM wake is enabled in
 	 * Initial Wake on LAN setting - If APM wake is enabled in

+ 3 - 1
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h

@@ -101,7 +101,9 @@ struct ixgbevf_ring {
 
 
 /* Supported Rx Buffer Sizes */
 /* Supported Rx Buffer Sizes */
 #define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
 #define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2048  2048
+#define IXGBEVF_RXBUFFER_3K    3072
+#define IXGBEVF_RXBUFFER_7K    7168
+#define IXGBEVF_RXBUFFER_15K   15360
 #define IXGBEVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 #define IXGBEVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 
 
 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256

+ 129 - 31
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

@@ -1057,15 +1057,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
 
 
 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 
 
-	if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-		srrctl |= IXGBEVF_RXBUFFER_2048 >>
-			IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-	else
-		srrctl |= rx_ring->rx_buf_len >>
-			IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+	srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+		  IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
 	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
 	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
 }
 }
 
 
+static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	int i;
+	u16 rx_buf_len;
+
+	/* notify the PF of our intent to use this size of frame */
+	ixgbevf_rlpml_set_vf(hw, max_frame);
+
+	/* PF will allow an extra 4 bytes past for vlan tagged frames */
+	max_frame += VLAN_HLEN;
+
+	/*
+	 * Make best use of allocation by using all but 1K of a
+	 * power of 2 allocation that will be used for skb->head.
+	 */
+	if ((hw->mac.type == ixgbe_mac_X540_vf) &&
+	    (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
+		rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	else if (max_frame <= IXGBEVF_RXBUFFER_3K)
+		rx_buf_len = IXGBEVF_RXBUFFER_3K;
+	else if (max_frame <= IXGBEVF_RXBUFFER_7K)
+		rx_buf_len = IXGBEVF_RXBUFFER_7K;
+	else if (max_frame <= IXGBEVF_RXBUFFER_15K)
+		rx_buf_len = IXGBEVF_RXBUFFER_15K;
+	else
+		rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+}
+
 /**
 /**
  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
  * @adapter: board private structure
  * @adapter: board private structure
@@ -1076,18 +1107,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 {
 {
 	u64 rdba;
 	u64 rdba;
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_hw *hw = &adapter->hw;
-	struct net_device *netdev = adapter->netdev;
-	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	int i, j;
 	int i, j;
 	u32 rdlen;
 	u32 rdlen;
-	int rx_buf_len;
 
 
 	/* PSRTYPE must be initialized in 82599 */
 	/* PSRTYPE must be initialized in 82599 */
 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
-	if (netdev->mtu <= ETH_DATA_LEN)
-		rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-	else
-		rx_buf_len = ALIGN(max_frame, 1024);
+
+	/* set_rx_buffer_len must be called before ring initialization */
+	ixgbevf_set_rx_buffer_len(adapter);
 
 
 	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
 	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1130,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
 		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
 		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
 		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
 		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
 
 
 		ixgbevf_configure_srrctl(adapter, j);
 		ixgbevf_configure_srrctl(adapter, j);
 	}
 	}
@@ -1315,7 +1341,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 	int i, j = 0;
 	int i, j = 0;
 	int num_rx_rings = adapter->num_rx_queues;
 	int num_rx_rings = adapter->num_rx_queues;
 	u32 txdctl, rxdctl;
 	u32 txdctl, rxdctl;
-	u32 msg[2];
 
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		j = adapter->tx_ring[i].reg_idx;
 		j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1381,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
 			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
 	}
 	}
 
 
-	msg[0] = IXGBE_VF_SET_LPE;
-	msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-	hw->mbx.ops.write_posted(hw, msg, 2);
-
 	spin_unlock(&adapter->mbx_lock);
 	spin_unlock(&adapter->mbx_lock);
 
 
 	clear_bit(__IXGBEVF_DOWN, &adapter->state);
 	clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -1866,6 +1887,22 @@ err_set_interrupt:
 	return err;
 	return err;
 }
 }
 
 
+/**
+ * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+
+	ixgbevf_free_q_vectors(adapter);
+	ixgbevf_reset_interrupt_capability(adapter);
+}
+
 /**
 /**
  * ixgbevf_sw_init - Initialize general software structures
  * ixgbevf_sw_init - Initialize general software structures
  * (struct ixgbevf_adapter)
  * (struct ixgbevf_adapter)
@@ -2860,10 +2897,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
 {
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-	struct ixgbe_hw *hw = &adapter->hw;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
 	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
-	u32 msg[2];
 
 
 	if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
 	if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
 		max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
 		max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2877,35 +2912,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
 	/* must set new MTU before calling down or up */
 	/* must set new MTU before calling down or up */
 	netdev->mtu = new_mtu;
 	netdev->mtu = new_mtu;
 
 
-	if (!netif_running(netdev)) {
-		msg[0] = IXGBE_VF_SET_LPE;
-		msg[1] = max_frame;
-		hw->mbx.ops.write_posted(hw, msg, 2);
-	}
-
 	if (netif_running(netdev))
 	if (netif_running(netdev))
 		ixgbevf_reinit_locked(adapter);
 		ixgbevf_reinit_locked(adapter);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static void ixgbevf_shutdown(struct pci_dev *pdev)
+static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
 
 
 	netif_device_detach(netdev);
 	netif_device_detach(netdev);
 
 
 	if (netif_running(netdev)) {
 	if (netif_running(netdev)) {
+		rtnl_lock();
 		ixgbevf_down(adapter);
 		ixgbevf_down(adapter);
 		ixgbevf_free_irq(adapter);
 		ixgbevf_free_irq(adapter);
 		ixgbevf_free_all_tx_resources(adapter);
 		ixgbevf_free_all_tx_resources(adapter);
 		ixgbevf_free_all_rx_resources(adapter);
 		ixgbevf_free_all_rx_resources(adapter);
+		rtnl_unlock();
 	}
 	}
 
 
-	pci_save_state(pdev);
+	ixgbevf_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
 
 
+#endif
 	pci_disable_device(pdev);
 	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbevf_resume(struct pci_dev *pdev)
+{
+	struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/*
+	 * pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	rtnl_lock();
+	err = ixgbevf_init_interrupt_scheme(adapter);
+	rtnl_unlock();
+	if (err) {
+		dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+		return err;
+	}
+
+	ixgbevf_reset(adapter);
+
+	if (netif_running(netdev)) {
+		err = ixgbevf_open(netdev);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(netdev);
+
+	return err;
+}
+
+#endif /* CONFIG_PM */
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+	ixgbevf_suspend(pdev, PMSG_SUSPEND);
 }
 }
 
 
 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@@ -2946,7 +3037,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
 	return stats;
 	return stats;
 }
 }
 
 
-static const struct net_device_ops ixgbe_netdev_ops = {
+static const struct net_device_ops ixgbevf_netdev_ops = {
 	.ndo_open		= ixgbevf_open,
 	.ndo_open		= ixgbevf_open,
 	.ndo_stop		= ixgbevf_close,
 	.ndo_stop		= ixgbevf_close,
 	.ndo_start_xmit		= ixgbevf_xmit_frame,
 	.ndo_start_xmit		= ixgbevf_xmit_frame,
@@ -2962,7 +3053,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
 {
 {
-	dev->netdev_ops = &ixgbe_netdev_ops;
+	dev->netdev_ops = &ixgbevf_netdev_ops;
 	ixgbevf_set_ethtool_ops(dev);
 	ixgbevf_set_ethtool_ops(dev);
 	dev->watchdog_timeo = 5 * HZ;
 	dev->watchdog_timeo = 5 * HZ;
 }
 }
@@ -3131,6 +3222,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
 	return 0;
 	return 0;
 
 
 err_register:
 err_register:
+	ixgbevf_clear_interrupt_scheme(adapter);
 err_sw_init:
 err_sw_init:
 	ixgbevf_reset_interrupt_capability(adapter);
 	ixgbevf_reset_interrupt_capability(adapter);
 	iounmap(hw->hw_addr);
 	iounmap(hw->hw_addr);
@@ -3168,6 +3260,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
 	if (netdev->reg_state == NETREG_REGISTERED)
 	if (netdev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(netdev);
 		unregister_netdev(netdev);
 
 
+	ixgbevf_clear_interrupt_scheme(adapter);
 	ixgbevf_reset_interrupt_capability(adapter);
 	ixgbevf_reset_interrupt_capability(adapter);
 
 
 	iounmap(adapter->hw.hw_addr);
 	iounmap(adapter->hw.hw_addr);
@@ -3267,6 +3360,11 @@ static struct pci_driver ixgbevf_driver = {
 	.id_table = ixgbevf_pci_tbl,
 	.id_table = ixgbevf_pci_tbl,
 	.probe    = ixgbevf_probe,
 	.probe    = ixgbevf_probe,
 	.remove   = __devexit_p(ixgbevf_remove),
 	.remove   = __devexit_p(ixgbevf_remove),
+#ifdef CONFIG_PM
+	/* Power Management Hooks */
+	.suspend  = ixgbevf_suspend,
+	.resume   = ixgbevf_resume,
+#endif
 	.shutdown = ixgbevf_shutdown,
 	.shutdown = ixgbevf_shutdown,
 	.err_handler = &ixgbevf_err_handler
 	.err_handler = &ixgbevf_err_handler
 };
 };

+ 14 - 0
drivers/net/ethernet/intel/ixgbevf/vf.c

@@ -419,6 +419,20 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+	u32 msgbuf[2];
+
+	msgbuf[0] = IXGBE_VF_SET_LPE;
+	msgbuf[1] = max_size;
+	ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 	.init_hw             = ixgbevf_init_hw_vf,
 	.init_hw             = ixgbevf_init_hw_vf,
 	.reset_hw            = ixgbevf_reset_hw_vf,
 	.reset_hw            = ixgbevf_reset_hw_vf,

+ 1 - 0
drivers/net/ethernet/intel/ixgbevf/vf.h

@@ -170,5 +170,6 @@ struct ixgbevf_info {
 	const struct ixgbe_mac_operations *mac_ops;
 	const struct ixgbe_mac_operations *mac_ops;
 };
 };
 
 
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
 #endif /* __IXGBE_VF_H__ */
 #endif /* __IXGBE_VF_H__ */
 
 

+ 13 - 4
drivers/net/ethernet/nvidia/forcedeth.c

@@ -3409,7 +3409,7 @@ set_speed:
 
 
 	pause_flags = 0;
 	pause_flags = 0;
 	/* setup pause frame */
 	/* setup pause frame */
-	if (np->duplex != 0) {
+	if (netif_running(dev) && (np->duplex != 0)) {
 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
 			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
 			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
 
 
 	regs->version = FORCEDETH_REGS_VER;
 	regs->version = FORCEDETH_REGS_VER;
 	spin_lock_irq(&np->lock);
 	spin_lock_irq(&np->lock);
-	for (i = 0; i <= np->register_size/sizeof(u32); i++)
+	for (i = 0; i < np->register_size/sizeof(u32); i++)
 		rbuf[i] = readl(base + i*sizeof(u32));
 		rbuf[i] = readl(base + i*sizeof(u32));
 	spin_unlock_irq(&np->lock);
 	spin_unlock_irq(&np->lock);
 }
 }
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
 
 
 	netif_stop_queue(dev);
 	netif_stop_queue(dev);
 	spin_lock_irq(&np->lock);
 	spin_lock_irq(&np->lock);
+	nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
 	nv_stop_rxtx(dev);
 	nv_stop_rxtx(dev);
 	nv_txrx_reset(dev);
 	nv_txrx_reset(dev);
 
 
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 		goto out_error;
 		goto out_error;
 	}
 	}
 
 
+	netif_carrier_off(dev);
+
+	/* Some NICs freeze when TX pause is enabled while NIC is
+	 * down, and this stays across warm reboots. The sequence
+	 * below should be enough to recover from that state.
+	 */
+	nv_update_pause(dev, 0);
+	nv_start_tx(dev);
+	nv_stop_tx(dev);
+
 	if (id->driver_data & DEV_HAS_VLAN)
 	if (id->driver_data & DEV_HAS_VLAN)
 		nv_vlan_mode(dev, dev->features);
 		nv_vlan_mode(dev, dev->features);
 
 
-	netif_carrier_off(dev);
-
 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
 

+ 2 - 0
drivers/net/ethernet/realtek/r8169.c

@@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
+	{ PCI_VENDOR_ID_DLINK,			0x4300,
+		PCI_VENDOR_ID_DLINK, 0x4b10,		 0, 0, RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4302), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4302), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AT,		0xc107), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AT,		0xc107), 0, 0, RTL_CFG_0 },

+ 133 - 102
drivers/net/ethernet/sfc/efx.c

@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
 
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
 	do {						\
 	do {						\
-		if ((efx->state == STATE_RUNNING) ||	\
+		if ((efx->state == STATE_READY) ||	\
 		    (efx->state == STATE_DISABLED))	\
 		    (efx->state == STATE_DISABLED))	\
 			ASSERT_RTNL();			\
 			ASSERT_RTNL();			\
 	} while (0)
 	} while (0)
 
 
+static int efx_check_disabled(struct efx_nic *efx)
+{
+	if (efx->state == STATE_DISABLED) {
+		netif_err(efx, drv, efx->net_dev,
+			  "device is disabled due to earlier errors\n");
+		return -EIO;
+	}
+	return 0;
+}
+
 /**************************************************************************
 /**************************************************************************
  *
  *
  * Event queue processing
  * Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
 	efx->rx_buffer_order = get_order(efx->rx_buffer_len +
 	efx->rx_buffer_order = get_order(efx->rx_buffer_len +
 					 sizeof(struct efx_rx_page_state));
 					 sizeof(struct efx_rx_page_state));
 
 
+	/* We must keep at least one descriptor in a TX ring empty.
+	 * We could avoid this when the queue size does not exactly
+	 * match the hardware ring size, but it's not that important.
+	 * Therefore we stop the queue when one more skb might fill
+	 * the ring completely.  We wake it when half way back to
+	 * empty.
+	 */
+	efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
 	/* Initialise the channels */
 	/* Initialise the channels */
 	efx_for_each_channel(channel, efx) {
 	efx_for_each_channel(channel, efx) {
 		efx_for_each_channel_tx_queue(tx_queue, channel)
 		efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
 	u32 old_rxq_entries, old_txq_entries;
 	u32 old_rxq_entries, old_txq_entries;
 	unsigned i, next_buffer_table = 0;
 	unsigned i, next_buffer_table = 0;
-	int rc = 0;
+	int rc;
+
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
 
 
 	/* Not all channels should be reallocated. We must avoid
 	/* Not all channels should be reallocated. We must avoid
 	 * reallocating their buffer table entries.
 	 * reallocating their buffer table entries.
@@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
 {
 	struct efx_channel *channel;
 	struct efx_channel *channel;
 
 
+	BUG_ON(efx->state == STATE_DISABLED);
+
 	if (efx->legacy_irq)
 	if (efx->legacy_irq)
 		efx->legacy_irq_enabled = true;
 		efx->legacy_irq_enabled = true;
 	efx_nic_enable_interrupts(efx);
 	efx_nic_enable_interrupts(efx);
@@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
 {
 	struct efx_channel *channel;
 	struct efx_channel *channel;
 
 
+	if (efx->state == STATE_DISABLED)
+		return;
+
 	efx_mcdi_mode_poll(efx);
 	efx_mcdi_mode_poll(efx);
 
 
 	efx_nic_disable_interrupts(efx);
 	efx_nic_disable_interrupts(efx);
@@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx)
 	return rc;
 	return rc;
 }
 }
 
 
-/* Called after previous invocation(s) of efx_stop_all, restarts the port,
- * kernel transmit queues and NAPI processing, and ensures that the port is
- * scheduled to be reconfigured. This function is safe to call multiple
- * times when the NIC is in any state.
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured.  Interrupts must already be enabled.  This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
  */
  */
 static void efx_start_all(struct efx_nic *efx)
 static void efx_start_all(struct efx_nic *efx)
 {
 {
 	EFX_ASSERT_RESET_SERIALISED(efx);
 	EFX_ASSERT_RESET_SERIALISED(efx);
+	BUG_ON(efx->state == STATE_DISABLED);
 
 
 	/* Check that it is appropriate to restart the interface. All
 	/* Check that it is appropriate to restart the interface. All
 	 * of these flags are safe to read under just the rtnl lock */
 	 * of these flags are safe to read under just the rtnl lock */
-	if (efx->port_enabled)
-		return;
-	if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
-		return;
-	if (!netif_running(efx->net_dev))
+	if (efx->port_enabled || !netif_running(efx->net_dev))
 		return;
 		return;
 
 
 	efx_start_port(efx);
 	efx_start_port(efx);
@@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx)
 	cancel_work_sync(&efx->mac_work);
 	cancel_work_sync(&efx->mac_work);
 }
 }
 
 
-/* Quiesce hardware and software without bringing the link down.
- * Safe to call multiple times, when the nic and interface is in any
- * state. The caller is guaranteed to subsequently be in a position
- * to modify any hardware and software state they see fit without
- * taking locks. */
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down.  Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled.  Requires the RTNL lock.
+ */
 static void efx_stop_all(struct efx_nic *efx)
 static void efx_stop_all(struct efx_nic *efx)
 {
 {
 	EFX_ASSERT_RESET_SERIALISED(efx);
 	EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct mii_ioctl_data *data = if_mii(ifr);
 	struct mii_ioctl_data *data = if_mii(ifr);
 
 
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
 	/* Convert phy_id from older PRTAD/DEVAD format */
 	/* Convert phy_id from older PRTAD/DEVAD format */
 	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
 	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
 	    (data->phy_id & 0xfc00) == 0x0400)
 	    (data->phy_id & 0xfc00) == 0x0400)
@@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev)
 static int efx_net_open(struct net_device *net_dev)
 static int efx_net_open(struct net_device *net_dev)
 {
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_nic *efx = netdev_priv(net_dev);
-	EFX_ASSERT_RESET_SERIALISED(efx);
+	int rc;
 
 
 	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
 	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
 		  raw_smp_processor_id());
 		  raw_smp_processor_id());
 
 
-	if (efx->state == STATE_DISABLED)
-		return -EIO;
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
 	if (efx->phy_mode & PHY_MODE_SPECIAL)
 	if (efx->phy_mode & PHY_MODE_SPECIAL)
 		return -EBUSY;
 		return -EBUSY;
 	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
 	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev)
 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
 		  raw_smp_processor_id());
 		  raw_smp_processor_id());
 
 
-	if (efx->state != STATE_DISABLED) {
-		/* Stop the device and flush all the channels */
-		efx_stop_all(efx);
-	}
+	/* Stop the device and flush all the channels */
+	efx_stop_all(efx);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev)
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 {
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
 
 
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
 	if (new_mtu > EFX_MAX_MTU)
 	if (new_mtu > EFX_MAX_MTU)
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
 
 	mutex_lock(&efx->mac_lock);
 	mutex_lock(&efx->mac_lock);
-	/* Reconfigure the MAC before enabling the dma queues so that
-	 * the RX buffers don't overflow */
 	net_dev->mtu = new_mtu;
 	net_dev->mtu = new_mtu;
 	efx->type->reconfigure_mac(efx);
 	efx->type->reconfigure_mac(efx);
 	mutex_unlock(&efx->mac_lock);
 	mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
 	struct sockaddr *addr = data;
 	struct sockaddr *addr = data;
 	char *new_addr = addr->sa_data;
 	char *new_addr = addr->sa_data;
 
 
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
 	if (!is_valid_ether_addr(new_addr)) {
 	if (!is_valid_ether_addr(new_addr)) {
 		netif_err(efx, drv, efx->net_dev,
 		netif_err(efx, drv, efx->net_dev,
 			  "invalid ethernet MAC address requested: %pM\n",
 			  "invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx)
 
 
 	rtnl_lock();
 	rtnl_lock();
 
 
+	/* Enable resets to be scheduled and check whether any were
+	 * already requested.  If so, the NIC is probably hosed so we
+	 * abort.
+	 */
+	efx->state = STATE_READY;
+	smp_mb(); /* ensure we change state before checking reset_pending */
+	if (efx->reset_pending) {
+		netif_err(efx, probe, efx->net_dev,
+			  "aborting probe due to scheduled reset\n");
+		rc = -EIO;
+		goto fail_locked;
+	}
+
 	rc = dev_alloc_name(net_dev, net_dev->name);
 	rc = dev_alloc_name(net_dev, net_dev->name);
 	if (rc < 0)
 	if (rc < 0)
 		goto fail_locked;
 		goto fail_locked;
 	efx_update_name(efx);
 	efx_update_name(efx);
 
 
+	/* Always start with carrier off; PHY events will detect the link */
+	netif_carrier_off(net_dev);
+
 	rc = register_netdevice(net_dev);
 	rc = register_netdevice(net_dev);
 	if (rc)
 	if (rc)
 		goto fail_locked;
 		goto fail_locked;
@@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx)
 			efx_init_tx_queue_core_txq(tx_queue);
 			efx_init_tx_queue_core_txq(tx_queue);
 	}
 	}
 
 
-	/* Always start with carrier off; PHY events will detect the link */
-	netif_carrier_off(net_dev);
-
 	rtnl_unlock();
 	rtnl_unlock();
 
 
 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx)
 
 
 	return 0;
 	return 0;
 
 
+fail_registered:
+	rtnl_lock();
+	unregister_netdevice(net_dev);
 fail_locked:
 fail_locked:
+	efx->state = STATE_UNINIT;
 	rtnl_unlock();
 	rtnl_unlock();
 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
 	return rc;
 	return rc;
-
-fail_registered:
-	unregister_netdev(net_dev);
-	return rc;
 }
 }
 
 
 static void efx_unregister_netdev(struct efx_nic *efx)
 static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
 
 
 	strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
 	strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
 	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
 	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-	unregister_netdev(efx->net_dev);
+
+	rtnl_lock();
+	unregister_netdevice(efx->net_dev);
+	efx->state = STATE_UNINIT;
+	rtnl_unlock();
 }
 }
 
 
 /**************************************************************************
 /**************************************************************************
@@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
 	EFX_ASSERT_RESET_SERIALISED(efx);
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 
 	efx_stop_all(efx);
 	efx_stop_all(efx);
-	mutex_lock(&efx->mac_lock);
-
 	efx_stop_interrupts(efx, false);
 	efx_stop_interrupts(efx, false);
+
+	mutex_lock(&efx->mac_lock);
 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
 		efx->phy_op->fini(efx);
 		efx->phy_op->fini(efx);
 	efx->type->fini(efx);
 	efx->type->fini(efx);
@@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data)
 	if (!pending)
 	if (!pending)
 		return;
 		return;
 
 
-	/* If we're not RUNNING then don't reset. Leave the reset_pending
-	 * flags set so that efx_pci_probe_main will be retried */
-	if (efx->state != STATE_RUNNING) {
-		netif_info(efx, drv, efx->net_dev,
-			   "scheduled reset quenched. NIC not RUNNING\n");
-		return;
-	}
-
 	rtnl_lock();
 	rtnl_lock();
-	(void)efx_reset(efx, fls(pending) - 1);
+
+	/* We checked the state in efx_schedule_reset() but it may
+	 * have changed by now.  Now that we have the RTNL lock,
+	 * it cannot change again.
+	 */
+	if (efx->state == STATE_READY)
+		(void)efx_reset(efx, fls(pending) - 1);
+
 	rtnl_unlock();
 	rtnl_unlock();
 }
 }
 
 
@@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
 	}
 	}
 
 
 	set_bit(method, &efx->reset_pending);
 	set_bit(method, &efx->reset_pending);
+	smp_mb(); /* ensure we change reset_pending before checking state */
+
+	/* If we're not READY then just leave the flags set as the cue
+	 * to abort probing or reschedule the reset later.
+	 */
+	if (ACCESS_ONCE(efx->state) != STATE_READY)
+		return;
 
 
 	/* efx_process_channel() will no longer read events once a
 	/* efx_process_channel() will no longer read events once a
 	 * reset is scheduled. So switch back to poll'd MCDI completions. */
 	 * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
 /* This zeroes out and then fills in the invariants in a struct
 /* This zeroes out and then fills in the invariants in a struct
  * efx_nic (including all sub-structures).
  * efx_nic (including all sub-structures).
  */
  */
-static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx,
 			   struct pci_dev *pci_dev, struct net_device *net_dev)
 			   struct pci_dev *pci_dev, struct net_device *net_dev)
 {
 {
 	int i;
 	int i;
 
 
 	/* Initialise common structures */
 	/* Initialise common structures */
-	memset(efx, 0, sizeof(*efx));
 	spin_lock_init(&efx->biu_lock);
 	spin_lock_init(&efx->biu_lock);
 #ifdef CONFIG_SFC_MTD
 #ifdef CONFIG_SFC_MTD
 	INIT_LIST_HEAD(&efx->mtd_list);
 	INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
 	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
 	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
 	efx->pci_dev = pci_dev;
 	efx->pci_dev = pci_dev;
 	efx->msg_enable = debug;
 	efx->msg_enable = debug;
-	efx->state = STATE_INIT;
+	efx->state = STATE_UNINIT;
 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 
 
 	efx->net_dev = net_dev;
 	efx->net_dev = net_dev;
@@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
 			goto fail;
 			goto fail;
 	}
 	}
 
 
-	efx->type = type;
-
 	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 
 
 	/* Higher numbered interrupt modes are less capable! */
 	/* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx)
  */
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
 {
+	/* Flush reset_work. It can no longer be scheduled since we
+	 * are not READY.
+	 */
+	BUG_ON(efx->state == STATE_READY);
+	cancel_work_sync(&efx->reset_work);
+
 #ifdef CONFIG_RFS_ACCEL
 #ifdef CONFIG_RFS_ACCEL
 	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
 	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
 	efx->net_dev->rx_cpu_rmap = NULL;
 	efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 
 
 	/* Mark the NIC as fini, then stop the interface */
 	/* Mark the NIC as fini, then stop the interface */
 	rtnl_lock();
 	rtnl_lock();
-	efx->state = STATE_FINI;
 	dev_close(efx->net_dev);
 	dev_close(efx->net_dev);
-
-	/* Allow any queued efx_resets() to complete */
+	efx_stop_interrupts(efx, false);
 	rtnl_unlock();
 	rtnl_unlock();
 
 
-	efx_stop_interrupts(efx, false);
 	efx_sriov_fini(efx);
 	efx_sriov_fini(efx);
 	efx_unregister_netdev(efx);
 	efx_unregister_netdev(efx);
 
 
 	efx_mtd_remove(efx);
 	efx_mtd_remove(efx);
 
 
-	/* Wait for any scheduled resets to complete. No more will be
-	 * scheduled from this point because efx_stop_all() has been
-	 * called, we are no longer registered with driverlink, and
-	 * the net_device's have been removed. */
-	cancel_work_sync(&efx->reset_work);
-
 	efx_pci_remove_main(efx);
 	efx_pci_remove_main(efx);
 
 
 	efx_fini_io(efx);
 	efx_fini_io(efx);
@@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 				   const struct pci_device_id *entry)
 				   const struct pci_device_id *entry)
 {
 {
-	const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
 	struct net_device *net_dev;
 	struct net_device *net_dev;
 	struct efx_nic *efx;
 	struct efx_nic *efx;
 	int rc;
 	int rc;
@@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 				     EFX_MAX_RX_QUEUES);
 				     EFX_MAX_RX_QUEUES);
 	if (!net_dev)
 	if (!net_dev)
 		return -ENOMEM;
 		return -ENOMEM;
-	net_dev->features |= (type->offload_features | NETIF_F_SG |
+	efx = netdev_priv(net_dev);
+	efx->type = (const struct efx_nic_type *) entry->driver_data;
+	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
 			      NETIF_F_HIGHDMA | NETIF_F_TSO |
 			      NETIF_F_HIGHDMA | NETIF_F_TSO |
 			      NETIF_F_RXCSUM);
 			      NETIF_F_RXCSUM);
-	if (type->offload_features & NETIF_F_V6_CSUM)
+	if (efx->type->offload_features & NETIF_F_V6_CSUM)
 		net_dev->features |= NETIF_F_TSO6;
 		net_dev->features |= NETIF_F_TSO6;
 	/* Mask for features that also apply to VLAN devices */
 	/* Mask for features that also apply to VLAN devices */
 	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
 	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 				   NETIF_F_RXCSUM);
 				   NETIF_F_RXCSUM);
 	/* All offloads can be toggled */
 	/* All offloads can be toggled */
 	net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
 	net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
-	efx = netdev_priv(net_dev);
 	pci_set_drvdata(pci_dev, efx);
 	pci_set_drvdata(pci_dev, efx);
 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
-	rc = efx_init_struct(efx, type, pci_dev, net_dev);
+	rc = efx_init_struct(efx, pci_dev, net_dev);
 	if (rc)
 	if (rc)
 		goto fail1;
 		goto fail1;
 
 
@@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
 		goto fail2;
 		goto fail2;
 
 
 	rc = efx_pci_probe_main(efx);
 	rc = efx_pci_probe_main(efx);
-
-	/* Serialise against efx_reset(). No more resets will be
-	 * scheduled since efx_stop_all() has been called, and we have
-	 * not and never have been registered.
-	 */
-	cancel_work_sync(&efx->reset_work);
-
 	if (rc)
 	if (rc)
 		goto fail3;
 		goto fail3;
 
 
-	/* If there was a scheduled reset during probe, the NIC is
-	 * probably hosed anyway.
-	 */
-	if (efx->reset_pending) {
-		rc = -EIO;
-		goto fail4;
-	}
-
-	/* Switch to the running state before we expose the device to the OS,
-	 * so that dev_open()|efx_start_all() will actually start the device */
-	efx->state = STATE_RUNNING;
-
 	rc = efx_register_netdev(efx);
 	rc = efx_register_netdev(efx);
 	if (rc)
 	if (rc)
 		goto fail4;
 		goto fail4;
@@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev)
 {
 {
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
 
-	efx->state = STATE_FINI;
+	rtnl_lock();
 
 
-	netif_device_detach(efx->net_dev);
+	if (efx->state != STATE_DISABLED) {
+		efx->state = STATE_UNINIT;
 
 
-	efx_stop_all(efx);
-	efx_stop_interrupts(efx, false);
+		netif_device_detach(efx->net_dev);
+
+		efx_stop_all(efx);
+		efx_stop_interrupts(efx, false);
+	}
+
+	rtnl_unlock();
 
 
 	return 0;
 	return 0;
 }
 }
@@ -2731,21 +2758,25 @@ static int efx_pm_thaw(struct device *dev)
 {
 {
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
 
-	efx->state = STATE_INIT;
+	rtnl_lock();
 
 
-	efx_start_interrupts(efx, false);
+	if (efx->state != STATE_DISABLED) {
+		efx_start_interrupts(efx, false);
 
 
-	mutex_lock(&efx->mac_lock);
-	efx->phy_op->reconfigure(efx);
-	mutex_unlock(&efx->mac_lock);
+		mutex_lock(&efx->mac_lock);
+		efx->phy_op->reconfigure(efx);
+		mutex_unlock(&efx->mac_lock);
 
 
-	efx_start_all(efx);
+		efx_start_all(efx);
 
 
-	netif_device_attach(efx->net_dev);
+		netif_device_attach(efx->net_dev);
 
 
-	efx->state = STATE_RUNNING;
+		efx->state = STATE_READY;
 
 
-	efx->type->resume_wol(efx);
+		efx->type->resume_wol(efx);
+	}
+
+	rtnl_unlock();
 
 
 	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
 	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
 	queue_work(reset_workqueue, &efx->reset_work);
 	queue_work(reset_workqueue, &efx->reset_work);

+ 3 - 5
drivers/net/ethernet/sfc/ethtool.c

@@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
 	if (!efx_tests)
 	if (!efx_tests)
 		goto fail;
 		goto fail;
 
 
-
-	ASSERT_RTNL();
-	if (efx->state != STATE_RUNNING) {
+	if (efx->state != STATE_READY) {
 		rc = -EIO;
 		rc = -EIO;
 		goto fail1;
 		goto fail1;
 	}
 	}
@@ -863,8 +861,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
 				       &ip_entry->ip4dst, &ip_entry->pdst);
 				       &ip_entry->ip4dst, &ip_entry->pdst);
 	if (rc != 0) {
 	if (rc != 0) {
 		rc = efx_filter_get_ipv4_full(
 		rc = efx_filter_get_ipv4_full(
-			&spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
-			&ip_entry->ip4dst, &ip_entry->pdst);
+			&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
+			&ip_entry->ip4src, &ip_entry->psrc);
 		EFX_WARN_ON_PARANOID(rc);
 		EFX_WARN_ON_PARANOID(rc);
 		ip_mask->ip4src = ~0;
 		ip_mask->ip4src = ~0;
 		ip_mask->psrc = ~0;
 		ip_mask->psrc = ~0;

+ 1 - 1
drivers/net/ethernet/sfc/falcon_boards.c

@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
 		new_mode = PHY_MODE_SPECIAL;
 		new_mode = PHY_MODE_SPECIAL;
 	if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
 	if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
 		err = 0;
 		err = 0;
-	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+	} else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
 		err = -EBUSY;
 		err = -EBUSY;
 	} else {
 	} else {
 		/* Reset the PHY, reconfigure the MAC and enable/disable
 		/* Reset the PHY, reconfigure the MAC and enable/disable

+ 26 - 23
drivers/net/ethernet/sfc/net_driver.h

@@ -91,29 +91,31 @@ struct efx_special_buffer {
 };
 };
 
 
 /**
 /**
- * struct efx_tx_buffer - An Efx TX buffer
- * @skb: The associated socket buffer.
- *	Set only on the final fragment of a packet; %NULL for all other
- *	fragments.  When this fragment completes, then we can free this
- *	skb.
- * @tsoh: The associated TSO header structure, or %NULL if this
- *	buffer is not a TSO header.
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ *	freed when descriptor completes
+ * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
+ *	freed when descriptor completes.
  * @dma_addr: DMA address of the fragment.
  * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
  * @len: Length of this fragment.
  * @len: Length of this fragment.
  *	This field is zero when the queue slot is empty.
  *	This field is zero when the queue slot is empty.
- * @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if dma_unmap_single should be used.
  * @unmap_len: Length of this fragment to unmap
  * @unmap_len: Length of this fragment to unmap
  */
  */
 struct efx_tx_buffer {
 struct efx_tx_buffer {
-	const struct sk_buff *skb;
-	struct efx_tso_header *tsoh;
+	union {
+		const struct sk_buff *skb;
+		void *heap_buf;
+	};
 	dma_addr_t dma_addr;
 	dma_addr_t dma_addr;
+	unsigned short flags;
 	unsigned short len;
 	unsigned short len;
-	bool continuation;
-	bool unmap_single;
 	unsigned short unmap_len;
 	unsigned short unmap_len;
 };
 };
+#define EFX_TX_BUF_CONT		1	/* not last descriptor of packet */
+#define EFX_TX_BUF_SKB		2	/* buffer is last part of skb */
+#define EFX_TX_BUF_HEAP		4	/* buffer was allocated with kmalloc() */
+#define EFX_TX_BUF_MAP_SINGLE	8	/* buffer was mapped with dma_map_single() */
 
 
 /**
 /**
  * struct efx_tx_queue - An Efx TX queue
  * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +135,7 @@ struct efx_tx_buffer {
  * @channel: The associated channel
  * @channel: The associated channel
  * @core_txq: The networking core TX queue structure
  * @core_txq: The networking core TX queue structure
  * @buffer: The software buffer ring
  * @buffer: The software buffer ring
+ * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
  * @ptr_mask: The size of the ring minus 1.
  * @initialised: Has hardware queue been initialised?
  * @initialised: Has hardware queue been initialised?
@@ -156,9 +159,6 @@ struct efx_tx_buffer {
  *	variable indicates that the queue is full.  This is to
  *	variable indicates that the queue is full.  This is to
  *	avoid cache-line ping-pong between the xmit path and the
  *	avoid cache-line ping-pong between the xmit path and the
  *	completion path.
  *	completion path.
- * @tso_headers_free: A list of TSO headers allocated for this TX queue
- *	that are not in use, and so available for new TSO sends. The list
- *	is protected by the TX queue lock.
  * @tso_bursts: Number of times TSO xmit invoked by kernel
  * @tso_bursts: Number of times TSO xmit invoked by kernel
  * @tso_long_headers: Number of packets with headers too long for standard
  * @tso_long_headers: Number of packets with headers too long for standard
  *	blocks
  *	blocks
@@ -175,6 +175,7 @@ struct efx_tx_queue {
 	struct efx_channel *channel;
 	struct efx_channel *channel;
 	struct netdev_queue *core_txq;
 	struct netdev_queue *core_txq;
 	struct efx_tx_buffer *buffer;
 	struct efx_tx_buffer *buffer;
+	struct efx_buffer *tsoh_page;
 	struct efx_special_buffer txd;
 	struct efx_special_buffer txd;
 	unsigned int ptr_mask;
 	unsigned int ptr_mask;
 	bool initialised;
 	bool initialised;
@@ -187,7 +188,6 @@ struct efx_tx_queue {
 	unsigned int insert_count ____cacheline_aligned_in_smp;
 	unsigned int insert_count ____cacheline_aligned_in_smp;
 	unsigned int write_count;
 	unsigned int write_count;
 	unsigned int old_read_count;
 	unsigned int old_read_count;
-	struct efx_tso_header *tso_headers_free;
 	unsigned int tso_bursts;
 	unsigned int tso_bursts;
 	unsigned int tso_long_headers;
 	unsigned int tso_long_headers;
 	unsigned int tso_packets;
 	unsigned int tso_packets;
@@ -430,11 +430,9 @@ enum efx_int_mode {
 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
 
 
 enum nic_state {
 enum nic_state {
-	STATE_INIT = 0,
-	STATE_RUNNING = 1,
-	STATE_FINI = 2,
-	STATE_DISABLED = 3,
-	STATE_MAX,
+	STATE_UNINIT = 0,	/* device being probed/removed or is frozen */
+	STATE_READY = 1,	/* hardware ready and netdev registered */
+	STATE_DISABLED = 2,	/* device disabled due to hardware errors */
 };
 };
 
 
 /*
 /*
@@ -654,7 +652,7 @@ struct vfdi_status;
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
  * @msg_enable: Log message enable flags
  * @msg_enable: Log message enable flags
- * @state: Device state flag. Serialised by the rtnl_lock.
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
  * @reset_pending: Bitmask for pending resets
  * @reset_pending: Bitmask for pending resets
  * @tx_queue: TX DMA queues
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
  * @rx_queue: RX DMA queues
@@ -664,6 +662,8 @@ struct vfdi_status;
  *	should be allocated for this NIC
  *	should be allocated for this NIC
  * @rxq_entries: Size of receive queues requested by user.
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
  * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
  * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
  * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
  * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
  * @sram_lim_qw: Qword address limit of SRAM
  * @sram_lim_qw: Qword address limit of SRAM
@@ -774,6 +774,9 @@ struct efx_nic {
 
 
 	unsigned rxq_entries;
 	unsigned rxq_entries;
 	unsigned txq_entries;
 	unsigned txq_entries;
+	unsigned int txq_stop_thresh;
+	unsigned int txq_wake_thresh;
+
 	unsigned tx_dc_base;
 	unsigned tx_dc_base;
 	unsigned rx_dc_base;
 	unsigned rx_dc_base;
 	unsigned sram_lim_qw;
 	unsigned sram_lim_qw;

+ 4 - 2
drivers/net/ethernet/sfc/nic.c

@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
 /**************************************************************************
 /**************************************************************************
  *
  *
  * Generic buffer handling
  * Generic buffer handling
- * These buffers are used for interrupt status and MAC stats
+ * These buffers are used for interrupt status, MAC stats, etc.
  *
  *
  **************************************************************************/
  **************************************************************************/
 
 
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
 		++tx_queue->write_count;
 		++tx_queue->write_count;
 
 
 		/* Create TX descriptor ring entry */
 		/* Create TX descriptor ring entry */
+		BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
 		EFX_POPULATE_QWORD_4(*txd,
 		EFX_POPULATE_QWORD_4(*txd,
-				     FSF_AZ_TX_KER_CONT, buffer->continuation,
+				     FSF_AZ_TX_KER_CONT,
+				     buffer->flags & EFX_TX_BUF_CONT,
 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
 				     FSF_AZ_TX_KER_BUF_REGION, 0,
 				     FSF_AZ_TX_KER_BUF_REGION, 0,
 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);

+ 245 - 376
drivers/net/ethernet/sfc/tx.c

@@ -22,14 +22,6 @@
 #include "nic.h"
 #include "nic.h"
 #include "workarounds.h"
 #include "workarounds.h"
 
 
-/*
- * TX descriptor ring full threshold
- *
- * The tx_queue descriptor ring fill-level must fall below this value
- * before we restart the netif queue
- */
-#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
 			       struct efx_tx_buffer *buffer,
 			       struct efx_tx_buffer *buffer,
 			       unsigned int *pkts_compl,
 			       unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
 					 buffer->unmap_len);
 					 buffer->unmap_len);
-		if (buffer->unmap_single)
+		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
 					 DMA_TO_DEVICE);
 					 DMA_TO_DEVICE);
 		else
 		else
 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
 				       DMA_TO_DEVICE);
 				       DMA_TO_DEVICE);
 		buffer->unmap_len = 0;
 		buffer->unmap_len = 0;
-		buffer->unmap_single = false;
 	}
 	}
 
 
-	if (buffer->skb) {
+	if (buffer->flags & EFX_TX_BUF_SKB) {
 		(*pkts_compl)++;
 		(*pkts_compl)++;
 		(*bytes_compl) += buffer->skb->len;
 		(*bytes_compl) += buffer->skb->len;
 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
-		buffer->skb = NULL;
 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
 			   "TX queue %d transmission id %x complete\n",
 			   "TX queue %d transmission id %x complete\n",
 			   tx_queue->queue, tx_queue->read_count);
 			   tx_queue->queue, tx_queue->read_count);
+	} else if (buffer->flags & EFX_TX_BUF_HEAP) {
+		kfree(buffer->heap_buf);
 	}
 	}
-}
 
 
-/**
- * struct efx_tso_header - a DMA mapped buffer for packet headers
- * @next: Linked list of free ones.
- *	The list is protected by the TX queue lock.
- * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
- * @dma_addr: The DMA address of the header below.
- *
- * This controls the memory used for a TSO header.  Use TSOH_DATA()
- * to find the packet header data.  Use TSOH_SIZE() to calculate the
- * total size required for a given packet header length.  TSO headers
- * in the free list are exactly %TSOH_STD_SIZE bytes in size.
- */
-struct efx_tso_header {
-	union {
-		struct efx_tso_header *next;
-		size_t unmap_len;
-	};
-	dma_addr_t dma_addr;
-};
+	buffer->len = 0;
+	buffer->flags = 0;
+}
 
 
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 			       struct sk_buff *skb);
 			       struct sk_buff *skb);
-static void efx_fini_tso(struct efx_tx_queue *tx_queue);
-static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
-			       struct efx_tso_header *tsoh);
-
-static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
-			  struct efx_tx_buffer *buffer)
-{
-	if (buffer->tsoh) {
-		if (likely(!buffer->tsoh->unmap_len)) {
-			buffer->tsoh->next = tx_queue->tso_headers_free;
-			tx_queue->tso_headers_free = buffer->tsoh;
-		} else {
-			efx_tsoh_heap_free(tx_queue, buffer->tsoh);
-		}
-		buffer->tsoh = NULL;
-	}
-}
-
 
 
 static inline unsigned
 static inline unsigned
 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
 	return max_descs;
 	return max_descs;
 }
 }
 
 
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+		return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+	else
+		return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+	/* We need to consider both queues that the net core sees as one */
+	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+	struct efx_nic *efx = txq1->efx;
+	unsigned int fill_level;
+
+	fill_level = max(txq1->insert_count - txq1->old_read_count,
+			 txq2->insert_count - txq2->old_read_count);
+	if (likely(fill_level < efx->txq_stop_thresh))
+		return;
+
+	/* We used the stale old_read_count above, which gives us a
+	 * pessimistic estimate of the fill level (which may even
+	 * validly be >= efx->txq_entries).  Now try again using
+	 * read_count (more likely to be a cache miss).
+	 *
+	 * If we read read_count and then conditionally stop the
+	 * queue, it is possible for the completion path to race with
+	 * us and complete all outstanding descriptors in the middle,
+	 * after which there will be no more completions to wake it.
+	 * Therefore we stop the queue first, then read read_count
+	 * (with a memory barrier to ensure the ordering), then
+	 * restart the queue if the fill level turns out to be low
+	 * enough.
+	 */
+	netif_tx_stop_queue(txq1->core_txq);
+	smp_mb();
+	txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+	txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+	fill_level = max(txq1->insert_count - txq1->old_read_count,
+			 txq2->insert_count - txq2->old_read_count);
+	EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+	if (likely(fill_level < efx->txq_stop_thresh)) {
+		smp_mb();
+		if (likely(!efx->loopback_selftest))
+			netif_tx_start_queue(txq1->core_txq);
+	}
+}
+
 /*
 /*
  * Add a socket buffer to a TX queue
  * Add a socket buffer to a TX queue
  *
  *
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
  * This function is split out from efx_hard_start_xmit to allow the
  * This function is split out from efx_hard_start_xmit to allow the
  * loopback test to direct packets via specific TX queues.
  * loopback test to direct packets via specific TX queues.
  *
  *
- * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * Returns NETDEV_TX_OK.
  * You must hold netif_tx_lock() to call this function.
  * You must hold netif_tx_lock() to call this function.
  */
  */
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 	struct device *dma_dev = &efx->pci_dev->dev;
 	struct device *dma_dev = &efx->pci_dev->dev;
 	struct efx_tx_buffer *buffer;
 	struct efx_tx_buffer *buffer;
 	skb_frag_t *fragment;
 	skb_frag_t *fragment;
-	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
+	unsigned int len, unmap_len = 0, insert_ptr;
 	dma_addr_t dma_addr, unmap_addr = 0;
 	dma_addr_t dma_addr, unmap_addr = 0;
 	unsigned int dma_len;
 	unsigned int dma_len;
-	bool unmap_single;
-	int q_space, i = 0;
-	netdev_tx_t rc = NETDEV_TX_OK;
+	unsigned short dma_flags;
+	int i = 0;
 
 
 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
 
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 			return NETDEV_TX_OK;
 			return NETDEV_TX_OK;
 	}
 	}
 
 
-	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-	q_space = efx->txq_entries - 1 - fill_level;
-
 	/* Map for DMA.  Use dma_map_single rather than dma_map_page
 	/* Map for DMA.  Use dma_map_single rather than dma_map_page
 	 * since this is more efficient on machines with sparse
 	 * since this is more efficient on machines with sparse
 	 * memory.
 	 * memory.
 	 */
 	 */
-	unmap_single = true;
+	dma_flags = EFX_TX_BUF_MAP_SINGLE;
 	dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
 	dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
 
 
 	/* Process all fragments */
 	/* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 
 
 		/* Add to TX queue, splitting across DMA boundaries */
 		/* Add to TX queue, splitting across DMA boundaries */
 		do {
 		do {
-			if (unlikely(q_space-- <= 0)) {
-				/* It might be that completions have
-				 * happened since the xmit path last
-				 * checked.  Update the xmit path's
-				 * copy of read_count.
-				 */
-				netif_tx_stop_queue(tx_queue->core_txq);
-				/* This memory barrier protects the
-				 * change of queue state from the access
-				 * of read_count. */
-				smp_mb();
-				tx_queue->old_read_count =
-					ACCESS_ONCE(tx_queue->read_count);
-				fill_level = (tx_queue->insert_count
-					      - tx_queue->old_read_count);
-				q_space = efx->txq_entries - 1 - fill_level;
-				if (unlikely(q_space-- <= 0)) {
-					rc = NETDEV_TX_BUSY;
-					goto unwind;
-				}
-				smp_mb();
-				if (likely(!efx->loopback_selftest))
-					netif_tx_start_queue(
-						tx_queue->core_txq);
-			}
-
 			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 			buffer = &tx_queue->buffer[insert_ptr];
 			buffer = &tx_queue->buffer[insert_ptr];
-			efx_tsoh_free(tx_queue, buffer);
-			EFX_BUG_ON_PARANOID(buffer->tsoh);
-			EFX_BUG_ON_PARANOID(buffer->skb);
+			EFX_BUG_ON_PARANOID(buffer->flags);
 			EFX_BUG_ON_PARANOID(buffer->len);
 			EFX_BUG_ON_PARANOID(buffer->len);
-			EFX_BUG_ON_PARANOID(!buffer->continuation);
 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
 
 			dma_len = efx_max_tx_len(efx, dma_addr);
 			dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 			/* Fill out per descriptor fields */
 			/* Fill out per descriptor fields */
 			buffer->len = dma_len;
 			buffer->len = dma_len;
 			buffer->dma_addr = dma_addr;
 			buffer->dma_addr = dma_addr;
+			buffer->flags = EFX_TX_BUF_CONT;
 			len -= dma_len;
 			len -= dma_len;
 			dma_addr += dma_len;
 			dma_addr += dma_len;
 			++tx_queue->insert_count;
 			++tx_queue->insert_count;
 		} while (len);
 		} while (len);
 
 
 		/* Transfer ownership of the unmapping to the final buffer */
 		/* Transfer ownership of the unmapping to the final buffer */
-		buffer->unmap_single = unmap_single;
+		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
 		buffer->unmap_len = unmap_len;
 		buffer->unmap_len = unmap_len;
 		unmap_len = 0;
 		unmap_len = 0;
 
 
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 		len = skb_frag_size(fragment);
 		len = skb_frag_size(fragment);
 		i++;
 		i++;
 		/* Map for DMA */
 		/* Map for DMA */
-		unmap_single = false;
+		dma_flags = 0;
 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
 					    DMA_TO_DEVICE);
 					    DMA_TO_DEVICE);
 	}
 	}
 
 
 	/* Transfer ownership of the skb to the final buffer */
 	/* Transfer ownership of the skb to the final buffer */
 	buffer->skb = skb;
 	buffer->skb = skb;
-	buffer->continuation = false;
+	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
 
 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
 
 
 	/* Pass off to hardware */
 	/* Pass off to hardware */
 	efx_nic_push_buffers(tx_queue);
 	efx_nic_push_buffers(tx_queue);
 
 
+	efx_tx_maybe_stop_queue(tx_queue);
+
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 
 
  dma_err:
  dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 	/* Mark the packet as transmitted, and free the SKB ourselves */
 	/* Mark the packet as transmitted, and free the SKB ourselves */
 	dev_kfree_skb_any(skb);
 	dev_kfree_skb_any(skb);
 
 
- unwind:
 	/* Work backwards until we hit the original insert pointer value */
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		unsigned int pkts_compl = 0, bytes_compl = 0;
 		unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 		buffer = &tx_queue->buffer[insert_ptr];
 		buffer = &tx_queue->buffer[insert_ptr];
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-		buffer->len = 0;
 	}
 	}
 
 
 	/* Free the fragment we were mid-way through pushing */
 	/* Free the fragment we were mid-way through pushing */
 	if (unmap_len) {
 	if (unmap_len) {
-		if (unmap_single)
+		if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
 			dma_unmap_single(dma_dev, unmap_addr, unmap_len,
 			dma_unmap_single(dma_dev, unmap_addr, unmap_len,
 					 DMA_TO_DEVICE);
 					 DMA_TO_DEVICE);
 		else
 		else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 				       DMA_TO_DEVICE);
 				       DMA_TO_DEVICE);
 	}
 	}
 
 
-	return rc;
+	return NETDEV_TX_OK;
 }
 }
 
 
 /* Remove packets from the TX queue
 /* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
 		}
 		}
 
 
 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-		buffer->continuation = true;
-		buffer->len = 0;
 
 
 		++tx_queue->read_count;
 		++tx_queue->read_count;
 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -450,6 +423,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
 {
 	unsigned fill_level;
 	unsigned fill_level;
 	struct efx_nic *efx = tx_queue->efx;
 	struct efx_nic *efx = tx_queue->efx;
+	struct efx_tx_queue *txq2;
 	unsigned int pkts_compl = 0, bytes_compl = 0;
 	unsigned int pkts_compl = 0, bytes_compl = 0;
 
 
 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +431,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
 	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
 
-	/* See if we need to restart the netif queue.  This barrier
-	 * separates the update of read_count from the test of the
-	 * queue state. */
+	/* See if we need to restart the netif queue.  This memory
+	 * barrier ensures that we write read_count (inside
+	 * efx_dequeue_buffers()) before reading the queue status.
+	 */
 	smp_mb();
 	smp_mb();
 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
 	    likely(efx->port_enabled) &&
 	    likely(efx->port_enabled) &&
 	    likely(netif_device_present(efx->net_dev))) {
 	    likely(netif_device_present(efx->net_dev))) {
-		fill_level = tx_queue->insert_count - tx_queue->read_count;
-		if (fill_level < EFX_TXQ_THRESHOLD(efx))
+		txq2 = efx_tx_queue_partner(tx_queue);
+		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+				 txq2->insert_count - txq2->read_count);
+		if (fill_level <= efx->txq_wake_thresh)
 			netif_tx_wake_queue(tx_queue->core_txq);
 			netif_tx_wake_queue(tx_queue->core_txq);
 	}
 	}
 
 
@@ -480,11 +457,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 	}
 	}
 }
 }
 
 
+/* Size of page-based TSO header buffers.  Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE	128
+#define TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an skb.
+ */
+static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+{
+	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+}
+
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 {
 {
 	struct efx_nic *efx = tx_queue->efx;
 	struct efx_nic *efx = tx_queue->efx;
 	unsigned int entries;
 	unsigned int entries;
-	int i, rc;
+	int rc;
 
 
 	/* Create the smallest power-of-two aligned ring */
 	/* Create the smallest power-of-two aligned ring */
 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +492,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 				   GFP_KERNEL);
 				   GFP_KERNEL);
 	if (!tx_queue->buffer)
 	if (!tx_queue->buffer)
 		return -ENOMEM;
 		return -ENOMEM;
-	for (i = 0; i <= tx_queue->ptr_mask; ++i)
-		tx_queue->buffer[i].continuation = true;
+
+	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
+		tx_queue->tsoh_page =
+			kcalloc(efx_tsoh_page_count(tx_queue),
+				sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
+		if (!tx_queue->tsoh_page) {
+			rc = -ENOMEM;
+			goto fail1;
+		}
+	}
 
 
 	/* Allocate hardware ring */
 	/* Allocate hardware ring */
 	rc = efx_nic_probe_tx(tx_queue);
 	rc = efx_nic_probe_tx(tx_queue);
 	if (rc)
 	if (rc)
-		goto fail;
+		goto fail2;
 
 
 	return 0;
 	return 0;
 
 
- fail:
+fail2:
+	kfree(tx_queue->tsoh_page);
+	tx_queue->tsoh_page = NULL;
+fail1:
 	kfree(tx_queue->buffer);
 	kfree(tx_queue->buffer);
 	tx_queue->buffer = NULL;
 	tx_queue->buffer = NULL;
 	return rc;
 	return rc;
@@ -546,8 +549,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
 		unsigned int pkts_compl = 0, bytes_compl = 0;
 		unsigned int pkts_compl = 0, bytes_compl = 0;
 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-		buffer->continuation = true;
-		buffer->len = 0;
 
 
 		++tx_queue->read_count;
 		++tx_queue->read_count;
 	}
 	}
@@ -568,13 +569,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 	efx_nic_fini_tx(tx_queue);
 	efx_nic_fini_tx(tx_queue);
 
 
 	efx_release_tx_buffers(tx_queue);
 	efx_release_tx_buffers(tx_queue);
-
-	/* Free up TSO header cache */
-	efx_fini_tso(tx_queue);
 }
 }
 
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
 {
+	int i;
+
 	if (!tx_queue->buffer)
 	if (!tx_queue->buffer)
 		return;
 		return;
 
 
@@ -582,6 +582,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 		  "destroying TX queue %d\n", tx_queue->queue);
 		  "destroying TX queue %d\n", tx_queue->queue);
 	efx_nic_remove_tx(tx_queue);
 	efx_nic_remove_tx(tx_queue);
 
 
+	if (tx_queue->tsoh_page) {
+		for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+			efx_nic_free_buffer(tx_queue->efx,
+					    &tx_queue->tsoh_page[i]);
+		kfree(tx_queue->tsoh_page);
+		tx_queue->tsoh_page = NULL;
+	}
+
 	kfree(tx_queue->buffer);
 	kfree(tx_queue->buffer);
 	tx_queue->buffer = NULL;
 	tx_queue->buffer = NULL;
 }
 }
@@ -604,22 +612,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 #define TSOH_OFFSET	NET_IP_ALIGN
 #define TSOH_OFFSET	NET_IP_ALIGN
 #endif
 #endif
 
 
-#define TSOH_BUFFER(tsoh)	((u8 *)(tsoh + 1) + TSOH_OFFSET)
-
-/* Total size of struct efx_tso_header, buffer and padding */
-#define TSOH_SIZE(hdr_len)					\
-	(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
-
-/* Size of blocks on free list.  Larger blocks must be allocated from
- * the heap.
- */
-#define TSOH_STD_SIZE		128
-
 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
-#define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
-#define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
-#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
-#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
 
 
 /**
 /**
  * struct tso_state - TSO state for an SKB
  * struct tso_state - TSO state for an SKB
@@ -631,10 +624,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * @in_len: Remaining length in current SKB fragment
  * @in_len: Remaining length in current SKB fragment
  * @unmap_len: Length of SKB fragment
  * @unmap_len: Length of SKB fragment
  * @unmap_addr: DMA address of SKB fragment
  * @unmap_addr: DMA address of SKB fragment
- * @unmap_single: DMA single vs page mapping flag
+ * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
  * @protocol: Network protocol (after any VLAN header)
  * @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
  * @header_len: Number of bytes of header
- * @full_packet_size: Number of bytes to put in each outgoing segment
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
  *
  *
  * The state used during segmentation.  It is put into this data structure
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
  * just to make it easy to pass into inline functions.
@@ -651,11 +646,13 @@ struct tso_state {
 	unsigned in_len;
 	unsigned in_len;
 	unsigned unmap_len;
 	unsigned unmap_len;
 	dma_addr_t unmap_addr;
 	dma_addr_t unmap_addr;
-	bool unmap_single;
+	unsigned short dma_flags;
 
 
 	__be16 protocol;
 	__be16 protocol;
+	unsigned int ip_off;
+	unsigned int tcp_off;
 	unsigned header_len;
 	unsigned header_len;
-	int full_packet_size;
+	unsigned int ip_base_len;
 };
 };
 
 
 
 
@@ -687,91 +684,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
 	return protocol;
 	return protocol;
 }
 }
 
 
-
-/*
- * Allocate a page worth of efx_tso_header structures, and string them
- * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
- */
-static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
+			       struct efx_tx_buffer *buffer, unsigned int len)
 {
 {
-	struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
-	struct efx_tso_header *tsoh;
-	dma_addr_t dma_addr;
-	u8 *base_kva, *kva;
-
-	base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
-	if (base_kva == NULL) {
-		netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
-			  "Unable to allocate page for TSO headers\n");
-		return -ENOMEM;
-	}
-
-	/* dma_alloc_coherent() allocates pages. */
-	EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
-
-	for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
-		tsoh = (struct efx_tso_header *)kva;
-		tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
-		tsoh->next = tx_queue->tso_headers_free;
-		tx_queue->tso_headers_free = tsoh;
-	}
-
-	return 0;
-}
+	u8 *result;
 
 
+	EFX_BUG_ON_PARANOID(buffer->len);
+	EFX_BUG_ON_PARANOID(buffer->flags);
+	EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
 
-/* Free up a TSO header, and all others in the same page. */
-static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
-				struct efx_tso_header *tsoh,
-				struct device *dma_dev)
-{
-	struct efx_tso_header **p;
-	unsigned long base_kva;
-	dma_addr_t base_dma;
-
-	base_kva = (unsigned long)tsoh & PAGE_MASK;
-	base_dma = tsoh->dma_addr & PAGE_MASK;
-
-	p = &tx_queue->tso_headers_free;
-	while (*p != NULL) {
-		if (((unsigned long)*p & PAGE_MASK) == base_kva)
-			*p = (*p)->next;
-		else
-			p = &(*p)->next;
-	}
-
-	dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
-}
+	if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+		unsigned index =
+			(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
+		struct efx_buffer *page_buf =
+			&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
+		unsigned offset =
+			TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+
+		if (unlikely(!page_buf->addr) &&
+		    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+			return NULL;
+
+		result = (u8 *)page_buf->addr + offset;
+		buffer->dma_addr = page_buf->dma_addr + offset;
+		buffer->flags = EFX_TX_BUF_CONT;
+	} else {
+		tx_queue->tso_long_headers++;
 
 
-static struct efx_tso_header *
-efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
-{
-	struct efx_tso_header *tsoh;
-
-	tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
-	if (unlikely(!tsoh))
-		return NULL;
-
-	tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
-					TSOH_BUFFER(tsoh), header_len,
-					DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
-				       tsoh->dma_addr))) {
-		kfree(tsoh);
-		return NULL;
+		buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+		if (unlikely(!buffer->heap_buf))
+			return NULL;
+		result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+		buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
 	}
 	}
 
 
-	tsoh->unmap_len = header_len;
-	return tsoh;
-}
+	buffer->len = len;
 
 
-static void
-efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
-{
-	dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-			 tsoh->dma_addr, tsoh->unmap_len,
-			 DMA_TO_DEVICE);
-	kfree(tsoh);
+	return result;
 }
 }
 
 
 /**
 /**
@@ -781,47 +730,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
  * @len:		Length of fragment
  * @len:		Length of fragment
  * @final_buffer:	The final buffer inserted into the queue
  * @final_buffer:	The final buffer inserted into the queue
  *
  *
- * Push descriptors onto the TX queue.  Return 0 on success or 1 if
- * @tx_queue full.
+ * Push descriptors onto the TX queue.
  */
  */
-static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
-			       dma_addr_t dma_addr, unsigned len,
-			       struct efx_tx_buffer **final_buffer)
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+				dma_addr_t dma_addr, unsigned len,
+				struct efx_tx_buffer **final_buffer)
 {
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_tx_buffer *buffer;
 	struct efx_nic *efx = tx_queue->efx;
 	struct efx_nic *efx = tx_queue->efx;
-	unsigned dma_len, fill_level, insert_ptr;
-	int q_space;
+	unsigned dma_len, insert_ptr;
 
 
 	EFX_BUG_ON_PARANOID(len <= 0);
 	EFX_BUG_ON_PARANOID(len <= 0);
 
 
-	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-	/* -1 as there is no way to represent all descriptors used */
-	q_space = efx->txq_entries - 1 - fill_level;
-
 	while (1) {
 	while (1) {
-		if (unlikely(q_space-- <= 0)) {
-			/* It might be that completions have happened
-			 * since the xmit path last checked.  Update
-			 * the xmit path's copy of read_count.
-			 */
-			netif_tx_stop_queue(tx_queue->core_txq);
-			/* This memory barrier protects the change of
-			 * queue state from the access of read_count. */
-			smp_mb();
-			tx_queue->old_read_count =
-				ACCESS_ONCE(tx_queue->read_count);
-			fill_level = (tx_queue->insert_count
-				      - tx_queue->old_read_count);
-			q_space = efx->txq_entries - 1 - fill_level;
-			if (unlikely(q_space-- <= 0)) {
-				*final_buffer = NULL;
-				return 1;
-			}
-			smp_mb();
-			netif_tx_start_queue(tx_queue->core_txq);
-		}
-
 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
 		buffer = &tx_queue->buffer[insert_ptr];
 		buffer = &tx_queue->buffer[insert_ptr];
 		++tx_queue->insert_count;
 		++tx_queue->insert_count;
@@ -830,12 +751,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 				    tx_queue->read_count >=
 				    tx_queue->read_count >=
 				    efx->txq_entries);
 				    efx->txq_entries);
 
 
-		efx_tsoh_free(tx_queue, buffer);
 		EFX_BUG_ON_PARANOID(buffer->len);
 		EFX_BUG_ON_PARANOID(buffer->len);
 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
-		EFX_BUG_ON_PARANOID(buffer->skb);
-		EFX_BUG_ON_PARANOID(!buffer->continuation);
-		EFX_BUG_ON_PARANOID(buffer->tsoh);
+		EFX_BUG_ON_PARANOID(buffer->flags);
 
 
 		buffer->dma_addr = dma_addr;
 		buffer->dma_addr = dma_addr;
 
 
@@ -845,7 +763,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 		if (dma_len >= len)
 		if (dma_len >= len)
 			break;
 			break;
 
 
-		buffer->len = dma_len; /* Don't set the other members */
+		buffer->len = dma_len;
+		buffer->flags = EFX_TX_BUF_CONT;
 		dma_addr += dma_len;
 		dma_addr += dma_len;
 		len -= dma_len;
 		len -= dma_len;
 	}
 	}
@@ -853,7 +772,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 	EFX_BUG_ON_PARANOID(!len);
 	EFX_BUG_ON_PARANOID(!len);
 	buffer->len = len;
 	buffer->len = len;
 	*final_buffer = buffer;
 	*final_buffer = buffer;
-	return 0;
 }
 }
 
 
 
 
@@ -864,54 +782,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
  * a single fragment, and we know it doesn't cross a page boundary.  It
  * a single fragment, and we know it doesn't cross a page boundary.  It
  * also allows us to not worry about end-of-packet etc.
  * also allows us to not worry about end-of-packet etc.
  */
  */
-static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
-			       struct efx_tso_header *tsoh, unsigned len)
+static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
+			      struct efx_tx_buffer *buffer, u8 *header)
 {
 {
-	struct efx_tx_buffer *buffer;
-
-	buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-	efx_tsoh_free(tx_queue, buffer);
-	EFX_BUG_ON_PARANOID(buffer->len);
-	EFX_BUG_ON_PARANOID(buffer->unmap_len);
-	EFX_BUG_ON_PARANOID(buffer->skb);
-	EFX_BUG_ON_PARANOID(!buffer->continuation);
-	EFX_BUG_ON_PARANOID(buffer->tsoh);
-	buffer->len = len;
-	buffer->dma_addr = tsoh->dma_addr;
-	buffer->tsoh = tsoh;
+	if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
+		buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
+						  header, buffer->len,
+						  DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+					       buffer->dma_addr))) {
+			kfree(buffer->heap_buf);
+			buffer->len = 0;
+			buffer->flags = 0;
+			return -ENOMEM;
+		}
+		buffer->unmap_len = buffer->len;
+		buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
+	}
 
 
 	++tx_queue->insert_count;
 	++tx_queue->insert_count;
+	return 0;
 }
 }
 
 
 
 
-/* Remove descriptors put into a tx_queue. */
+/* Remove buffers put into a tx_queue.  None of the buffers must have
+ * an skb attached.
+ */
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_tx_buffer *buffer;
-	dma_addr_t unmap_addr;
 
 
 	/* Work backwards until we hit the original insert pointer value */
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		--tx_queue->insert_count;
 		--tx_queue->insert_count;
 		buffer = &tx_queue->buffer[tx_queue->insert_count &
 		buffer = &tx_queue->buffer[tx_queue->insert_count &
 					   tx_queue->ptr_mask];
 					   tx_queue->ptr_mask];
-		efx_tsoh_free(tx_queue, buffer);
-		EFX_BUG_ON_PARANOID(buffer->skb);
-		if (buffer->unmap_len) {
-			unmap_addr = (buffer->dma_addr + buffer->len -
-				      buffer->unmap_len);
-			if (buffer->unmap_single)
-				dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-						 unmap_addr, buffer->unmap_len,
-						 DMA_TO_DEVICE);
-			else
-				dma_unmap_page(&tx_queue->efx->pci_dev->dev,
-					       unmap_addr, buffer->unmap_len,
-					       DMA_TO_DEVICE);
-			buffer->unmap_len = 0;
-		}
-		buffer->len = 0;
-		buffer->continuation = true;
+		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
 	}
 	}
 }
 }
 
 
@@ -919,17 +825,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 /* Parse the SKB header and initialise state. */
 /* Parse the SKB header and initialise state. */
 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 {
 {
-	/* All ethernet/IP/TCP headers combined size is TCP header size
-	 * plus offset of TCP header relative to start of packet.
-	 */
-	st->header_len = ((tcp_hdr(skb)->doff << 2u)
-			  + PTR_DIFF(tcp_hdr(skb), skb->data));
-	st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
-
-	if (st->protocol == htons(ETH_P_IP))
+	st->ip_off = skb_network_header(skb) - skb->data;
+	st->tcp_off = skb_transport_header(skb) - skb->data;
+	st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+	if (st->protocol == htons(ETH_P_IP)) {
+		st->ip_base_len = st->header_len - st->ip_off;
 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
-	else
+	} else {
+		st->ip_base_len = st->header_len - st->tcp_off;
 		st->ipv4_id = 0;
 		st->ipv4_id = 0;
+	}
 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
 
 
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +843,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 
 
 	st->out_len = skb->len - st->header_len;
 	st->out_len = skb->len - st->header_len;
 	st->unmap_len = 0;
 	st->unmap_len = 0;
-	st->unmap_single = false;
+	st->dma_flags = 0;
 }
 }
 
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +852,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
 					  skb_frag_size(frag), DMA_TO_DEVICE);
 					  skb_frag_size(frag), DMA_TO_DEVICE);
 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-		st->unmap_single = false;
+		st->dma_flags = 0;
 		st->unmap_len = skb_frag_size(frag);
 		st->unmap_len = skb_frag_size(frag);
 		st->in_len = skb_frag_size(frag);
 		st->in_len = skb_frag_size(frag);
 		st->dma_addr = st->unmap_addr;
 		st->dma_addr = st->unmap_addr;
@@ -965,7 +870,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
 	st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
 	st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
 					len, DMA_TO_DEVICE);
 					len, DMA_TO_DEVICE);
 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-		st->unmap_single = true;
+		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
 		st->unmap_len = len;
 		st->unmap_len = len;
 		st->in_len = len;
 		st->in_len = len;
 		st->dma_addr = st->unmap_addr;
 		st->dma_addr = st->unmap_addr;
@@ -982,20 +887,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
  * @st:			TSO state
  * @st:			TSO state
  *
  *
  * Form descriptors for the current fragment, until we reach the end
  * Form descriptors for the current fragment, until we reach the end
- * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
- * space in @tx_queue.
+ * of fragment or end-of-packet.
  */
  */
-static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
-					 const struct sk_buff *skb,
-					 struct tso_state *st)
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+					  const struct sk_buff *skb,
+					  struct tso_state *st)
 {
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_tx_buffer *buffer;
-	int n, end_of_packet, rc;
+	int n;
 
 
 	if (st->in_len == 0)
 	if (st->in_len == 0)
-		return 0;
+		return;
 	if (st->packet_space == 0)
 	if (st->packet_space == 0)
-		return 0;
+		return;
 
 
 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +910,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
 	st->out_len -= n;
 	st->out_len -= n;
 	st->in_len -= n;
 	st->in_len -= n;
 
 
-	rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
-	if (likely(rc == 0)) {
-		if (st->out_len == 0)
-			/* Transfer ownership of the skb */
-			buffer->skb = skb;
+	efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
 
 
-		end_of_packet = st->out_len == 0 || st->packet_space == 0;
-		buffer->continuation = !end_of_packet;
+	if (st->out_len == 0) {
+		/* Transfer ownership of the skb */
+		buffer->skb = skb;
+		buffer->flags = EFX_TX_BUF_SKB;
+	} else if (st->packet_space != 0) {
+		buffer->flags = EFX_TX_BUF_CONT;
+	}
 
 
-		if (st->in_len == 0) {
-			/* Transfer ownership of the DMA mapping */
-			buffer->unmap_len = st->unmap_len;
-			buffer->unmap_single = st->unmap_single;
-			st->unmap_len = 0;
-		}
+	if (st->in_len == 0) {
+		/* Transfer ownership of the DMA mapping */
+		buffer->unmap_len = st->unmap_len;
+		buffer->flags |= st->dma_flags;
+		st->unmap_len = 0;
 	}
 	}
 
 
 	st->dma_addr += n;
 	st->dma_addr += n;
-	return rc;
 }
 }
 
 
 
 
@@ -1035,36 +938,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
  * @st:			TSO state
  * @st:			TSO state
  *
  *
  * Generate a new header and prepare for the new packet.  Return 0 on
  * Generate a new header and prepare for the new packet.  Return 0 on
- * success, or -1 if failed to alloc header.
+ * success, or -%ENOMEM if failed to alloc header.
  */
  */
 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 				const struct sk_buff *skb,
 				const struct sk_buff *skb,
 				struct tso_state *st)
 				struct tso_state *st)
 {
 {
-	struct efx_tso_header *tsoh;
+	struct efx_tx_buffer *buffer =
+		&tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
 	struct tcphdr *tsoh_th;
 	struct tcphdr *tsoh_th;
 	unsigned ip_length;
 	unsigned ip_length;
 	u8 *header;
 	u8 *header;
+	int rc;
 
 
-	/* Allocate a DMA-mapped header buffer. */
-	if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
-		if (tx_queue->tso_headers_free == NULL) {
-			if (efx_tsoh_block_alloc(tx_queue))
-				return -1;
-		}
-		EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
-		tsoh = tx_queue->tso_headers_free;
-		tx_queue->tso_headers_free = tsoh->next;
-		tsoh->unmap_len = 0;
-	} else {
-		tx_queue->tso_long_headers++;
-		tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
-		if (unlikely(!tsoh))
-			return -1;
-	}
+	/* Allocate and insert a DMA-mapped header buffer. */
+	header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+	if (!header)
+		return -ENOMEM;
 
 
-	header = TSOH_BUFFER(tsoh);
-	tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+	tsoh_th = (struct tcphdr *)(header + st->tcp_off);
 
 
 	/* Copy and update the headers. */
 	/* Copy and update the headers. */
 	memcpy(header, skb->data, st->header_len);
 	memcpy(header, skb->data, st->header_len);
@@ -1073,19 +965,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 	st->seqnum += skb_shinfo(skb)->gso_size;
 	st->seqnum += skb_shinfo(skb)->gso_size;
 	if (st->out_len > skb_shinfo(skb)->gso_size) {
 	if (st->out_len > skb_shinfo(skb)->gso_size) {
 		/* This packet will not finish the TSO burst. */
 		/* This packet will not finish the TSO burst. */
-		ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
+		st->packet_space = skb_shinfo(skb)->gso_size;
 		tsoh_th->fin = 0;
 		tsoh_th->fin = 0;
 		tsoh_th->psh = 0;
 		tsoh_th->psh = 0;
 	} else {
 	} else {
 		/* This packet will be the last in the TSO burst. */
 		/* This packet will be the last in the TSO burst. */
-		ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
+		st->packet_space = st->out_len;
 		tsoh_th->fin = tcp_hdr(skb)->fin;
 		tsoh_th->fin = tcp_hdr(skb)->fin;
 		tsoh_th->psh = tcp_hdr(skb)->psh;
 		tsoh_th->psh = tcp_hdr(skb)->psh;
 	}
 	}
+	ip_length = st->ip_base_len + st->packet_space;
 
 
 	if (st->protocol == htons(ETH_P_IP)) {
 	if (st->protocol == htons(ETH_P_IP)) {
-		struct iphdr *tsoh_iph =
-			(struct iphdr *)(header + SKB_IPV4_OFF(skb));
+		struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
 
 
 		tsoh_iph->tot_len = htons(ip_length);
 		tsoh_iph->tot_len = htons(ip_length);
 
 
@@ -1094,16 +986,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 		st->ipv4_id++;
 		st->ipv4_id++;
 	} else {
 	} else {
 		struct ipv6hdr *tsoh_iph =
 		struct ipv6hdr *tsoh_iph =
-			(struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+			(struct ipv6hdr *)(header + st->ip_off);
 
 
-		tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+		tsoh_iph->payload_len = htons(ip_length);
 	}
 	}
 
 
-	st->packet_space = skb_shinfo(skb)->gso_size;
-	++tx_queue->tso_packets;
+	rc = efx_tso_put_header(tx_queue, buffer, header);
+	if (unlikely(rc))
+		return rc;
 
 
-	/* Form a descriptor for this header. */
-	efx_tso_put_header(tx_queue, tsoh, st->header_len);
+	++tx_queue->tso_packets;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1118,13 +1010,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
  *
  *
  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
  * @skb was not enqueued.  In all cases @skb is consumed.  Return
  * @skb was not enqueued.  In all cases @skb is consumed.  Return
- * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ * %NETDEV_TX_OK.
  */
  */
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 			       struct sk_buff *skb)
 			       struct sk_buff *skb)
 {
 {
 	struct efx_nic *efx = tx_queue->efx;
 	struct efx_nic *efx = tx_queue->efx;
-	int frag_i, rc, rc2 = NETDEV_TX_OK;
+	int frag_i, rc;
 	struct tso_state state;
 	struct tso_state state;
 
 
 	/* Find the packet protocol and sanity-check it */
 	/* Find the packet protocol and sanity-check it */
@@ -1156,11 +1048,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 		goto mem_err;
 		goto mem_err;
 
 
 	while (1) {
 	while (1) {
-		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
-		if (unlikely(rc)) {
-			rc2 = NETDEV_TX_BUSY;
-			goto unwind;
-		}
+		tso_fill_packet_with_fragment(tx_queue, skb, &state);
 
 
 		/* Move onto the next fragment? */
 		/* Move onto the next fragment? */
 		if (state.in_len == 0) {
 		if (state.in_len == 0) {
@@ -1184,6 +1072,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 	/* Pass off to hardware */
 	/* Pass off to hardware */
 	efx_nic_push_buffers(tx_queue);
 	efx_nic_push_buffers(tx_queue);
 
 
+	efx_tx_maybe_stop_queue(tx_queue);
+
 	tx_queue->tso_bursts++;
 	tx_queue->tso_bursts++;
 	return NETDEV_TX_OK;
 	return NETDEV_TX_OK;
 
 
@@ -1192,10 +1082,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 		  "Out of memory for TSO headers, or DMA mapping error\n");
 		  "Out of memory for TSO headers, or DMA mapping error\n");
 	dev_kfree_skb_any(skb);
 	dev_kfree_skb_any(skb);
 
 
- unwind:
 	/* Free the DMA mapping we were in the process of writing out */
 	/* Free the DMA mapping we were in the process of writing out */
 	if (state.unmap_len) {
 	if (state.unmap_len) {
-		if (state.unmap_single)
+		if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
 			dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
 			dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
 					 state.unmap_len, DMA_TO_DEVICE);
 					 state.unmap_len, DMA_TO_DEVICE);
 		else
 		else
@@ -1204,25 +1093,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 	}
 	}
 
 
 	efx_enqueue_unwind(tx_queue);
 	efx_enqueue_unwind(tx_queue);
-	return rc2;
-}
-
-
-/*
- * Free up all TSO datastructures associated with tx_queue. This
- * routine should be called only once the tx_queue is both empty and
- * will no longer be used.
- */
-static void efx_fini_tso(struct efx_tx_queue *tx_queue)
-{
-	unsigned i;
-
-	if (tx_queue->buffer) {
-		for (i = 0; i <= tx_queue->ptr_mask; ++i)
-			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
-	}
-
-	while (tx_queue->tso_headers_free != NULL)
-		efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
-				    &tx_queue->efx->pci_dev->dev);
+	return NETDEV_TX_OK;
 }
 }

+ 5 - 0
drivers/net/ethernet/stmicro/stmmac/common.h

@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
 
 
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
 #include <linux/etherdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
 
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
+
+#endif /* __COMMON_H__ */

+ 6 - 0
drivers/net/ethernet/stmicro/stmmac/descs.h

@@ -20,6 +20,10 @@
 
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
 struct dma_desc {
 struct dma_desc {
 	/* Receive descriptor */
 	/* Receive descriptor */
 	union {
 	union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
 					 * is not calculated */
 					 * is not calculated */
 	cic_full = 3,		/* IP header and pseudoheader */
 	cic_full = 3,		/* IP header and pseudoheader */
 };
 };
+
+#endif /* __DESCS_H__ */

+ 5 - 0
drivers/net/ethernet/stmicro/stmmac/descs_com.h

@@ -27,6 +27,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
 
 
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
 #if defined(CONFIG_STMMAC_RING)
 #if defined(CONFIG_STMMAC_RING)
 static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
 static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
 {
 {
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
 	p->des01.tx.buffer1_size = len;
 	p->des01.tx.buffer1_size = len;
 }
 }
 #endif
 #endif
+
+#endif /* __DESC_COM_H__ */

+ 5 - 0
drivers/net/ethernet/stmicro/stmmac/dwmac100.h

@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
 
 
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
 #include <linux/phy.h>
 #include <linux/phy.h>
 #include "common.h"
 #include "common.h"
 
 
@@ -119,3 +122,5 @@ enum ttc_control {
 #define DMA_MISSED_FRAME_M_CNTR	0x0000ffff	/* Missed Frame Couinter */
 #define DMA_MISSED_FRAME_M_CNTR	0x0000ffff	/* Missed Frame Couinter */
 
 
 extern const struct stmmac_dma_ops dwmac100_dma_ops;
 extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */

+ 4 - 1
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h

@@ -19,6 +19,8 @@
 
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
 
 
 #include <linux/phy.h>
 #include <linux/phy.h>
 #include "common.h"
 #include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 
 
 /* Synopsys Core versions */
 /* Synopsys Core versions */
-#define	DWMAC_CORE_3_40	34
+#define	DWMAC_CORE_3_40	0x34
 
 
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */

+ 5 - 0
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h

@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
 
 
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
 /* DMA CRS Control and Status Register Mapping */
 /* DMA CRS Control and Status Register Mapping */
 #define DMA_BUS_MODE		0x00001000	/* Bus Mode */
 #define DMA_BUS_MODE		0x00001000	/* Bus Mode */
 #define DMA_XMT_POLL_DEMAND	0x00001004	/* Transmit Poll Demand */
 #define DMA_XMT_POLL_DEMAND	0x00001004	/* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
 extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
 extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
 extern int dwmac_dma_interrupt(void __iomem *ioaddr,
 extern int dwmac_dma_interrupt(void __iomem *ioaddr,
 				struct stmmac_extra_stats *x);
 				struct stmmac_extra_stats *x);
+
+#endif /* __DWMAC_DMA_H__ */

+ 5 - 0
drivers/net/ethernet/stmicro/stmmac/mmc.h

@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
 
 
+#ifndef __MMC_H__
+#define __MMC_H__
+
 /* MMC control register */
 /* MMC control register */
 /* When set, all counter are reset */
 /* When set, all counter are reset */
 #define MMC_CNTRL_COUNTER_RESET		0x1
 #define MMC_CNTRL_COUNTER_RESET		0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
 extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
 extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
 extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
 extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
 extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
 extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+
+#endif /* __MMC_H__ */

+ 3 - 3
drivers/net/ethernet/stmicro/stmmac/mmc_core.c

@@ -33,7 +33,7 @@
 #define MMC_TX_INTR		0x00000108	/* MMC TX Interrupt */
 #define MMC_TX_INTR		0x00000108	/* MMC TX Interrupt */
 #define MMC_RX_INTR_MASK	0x0000010c	/* MMC Interrupt Mask */
 #define MMC_RX_INTR_MASK	0x0000010c	/* MMC Interrupt Mask */
 #define MMC_TX_INTR_MASK	0x00000110	/* MMC Interrupt Mask */
 #define MMC_TX_INTR_MASK	0x00000110	/* MMC Interrupt Mask */
-#define MMC_DEFAUL_MASK		0xffffffff
+#define MMC_DEFAULT_MASK		0xffffffff
 
 
 /* MMC TX counter registers */
 /* MMC TX counter registers */
 
 
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
 /* To mask all all interrupts.*/
 /* To mask all all interrupts.*/
 void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
 void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
 {
 {
-	writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
-	writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+	writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
+	writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
 }
 }
 
 
 /* This reads the MAC core counters (if actaully supported).
 /* This reads the MAC core counters (if actaully supported).

+ 5 - 0
drivers/net/ethernet/stmicro/stmmac/stmmac.h

@@ -20,6 +20,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
 
 
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
 #define DRV_MODULE_VERSION	"March_2012"
 #define DRV_MODULE_VERSION	"March_2012"
 
 
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
 {
 {
 }
 }
 #endif /* CONFIG_STMMAC_PCI */
 #endif /* CONFIG_STMMAC_PCI */
+
+#endif /* __STMMAC_H__ */

+ 6 - 5
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c

@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
 	new_bus->write = &stmmac_mdio_write;
 	new_bus->write = &stmmac_mdio_write;
 	new_bus->reset = &stmmac_mdio_reset;
 	new_bus->reset = &stmmac_mdio_reset;
 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-		new_bus->name, mdio_bus_data->bus_id);
+		new_bus->name, priv->plat->bus_id);
 	new_bus->priv = ndev;
 	new_bus->priv = ndev;
 	new_bus->irq = irqlist;
 	new_bus->irq = irqlist;
 	new_bus->phy_mask = mdio_bus_data->phy_mask;
 	new_bus->phy_mask = mdio_bus_data->phy_mask;
@@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
 			 * and no PHY number was provided to the MAC,
 			 * and no PHY number was provided to the MAC,
 			 * use the one probed here.
 			 * use the one probed here.
 			 */
 			 */
-			if ((priv->plat->bus_id == mdio_bus_data->bus_id) &&
-			    (priv->plat->phy_addr == -1))
+			if (priv->plat->phy_addr == -1)
 				priv->plat->phy_addr = addr;
 				priv->plat->phy_addr = addr;
 
 
-			act = (priv->plat->bus_id == mdio_bus_data->bus_id) &&
-				(priv->plat->phy_addr == addr);
+			act = (priv->plat->phy_addr == addr);
 			switch (phydev->irq) {
 			switch (phydev->irq) {
 			case PHY_POLL:
 			case PHY_POLL:
 				irq_str = "POLL";
 				irq_str = "POLL";
@@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
 {
 {
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
 
 
+	if (!priv->mii)
+		return 0;
+
 	mdiobus_unregister(priv->mii);
 	mdiobus_unregister(priv->mii);
 	priv->mii->priv = NULL;
 	priv->mii->priv = NULL;
 	mdiobus_free(priv->mii);
 	mdiobus_free(priv->mii);

+ 0 - 1
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c

@@ -40,7 +40,6 @@ static void stmmac_default_data(void)
 	plat_dat.has_gmac = 1;
 	plat_dat.has_gmac = 1;
 	plat_dat.force_sf_dma_mode = 1;
 	plat_dat.force_sf_dma_mode = 1;
 
 
-	mdio_data.bus_id = 1;
 	mdio_data.phy_reset = NULL;
 	mdio_data.phy_reset = NULL;
 	mdio_data.phy_mask = 0;
 	mdio_data.phy_mask = 0;
 	plat_dat.mdio_bus_data = &mdio_data;
 	plat_dat.mdio_bus_data = &mdio_data;

+ 8 - 31
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c

@@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 {
 {
 	int ret = 0;
 	int ret = 0;
 	struct resource *res;
 	struct resource *res;
+	struct device *dev = &pdev->dev;
 	void __iomem *addr = NULL;
 	void __iomem *addr = NULL;
 	struct stmmac_priv *priv = NULL;
 	struct stmmac_priv *priv = NULL;
 	struct plat_stmmacenet_data *plat_dat = NULL;
 	struct plat_stmmacenet_data *plat_dat = NULL;
@@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 	if (!res)
 	if (!res)
 		return -ENODEV;
 		return -ENODEV;
 
 
-	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-		pr_err("%s: ERROR: memory allocation failed"
-		       "cannot get the I/O addr 0x%x\n",
-		       __func__, (unsigned int)res->start);
-		return -EBUSY;
-	}
-
-	addr = ioremap(res->start, resource_size(res));
+	addr = devm_request_and_ioremap(dev, res);
 	if (!addr) {
 	if (!addr) {
 		pr_err("%s: ERROR: memory mapping failed", __func__);
 		pr_err("%s: ERROR: memory mapping failed", __func__);
-		ret = -ENOMEM;
-		goto out_release_region;
+		return -ENOMEM;
 	}
 	}
 
 
 	if (pdev->dev.of_node) {
 	if (pdev->dev.of_node) {
@@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 					GFP_KERNEL);
 					GFP_KERNEL);
 		if (!plat_dat) {
 		if (!plat_dat) {
 			pr_err("%s: ERROR: no memory", __func__);
 			pr_err("%s: ERROR: no memory", __func__);
-			ret = -ENOMEM;
-			goto out_unmap;
+			return  -ENOMEM;
 		}
 		}
 
 
 		ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
 		ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
 		if (ret) {
 		if (ret) {
 			pr_err("%s: main dt probe failed", __func__);
 			pr_err("%s: main dt probe failed", __func__);
-			goto out_unmap;
+			return ret;
 		}
 		}
 	} else {
 	} else {
 		plat_dat = pdev->dev.platform_data;
 		plat_dat = pdev->dev.platform_data;
@@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 	if (plat_dat->init) {
 	if (plat_dat->init) {
 		ret = plat_dat->init(pdev);
 		ret = plat_dat->init(pdev);
 		if (unlikely(ret))
 		if (unlikely(ret))
-			goto out_unmap;
+			return ret;
 	}
 	}
 
 
 	priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
 	priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
 	if (!priv) {
 	if (!priv) {
 		pr_err("%s: main driver probe failed", __func__);
 		pr_err("%s: main driver probe failed", __func__);
-		goto out_unmap;
+		return -ENODEV;
 	}
 	}
 
 
 	/* Get MAC address if available (DT) */
 	/* Get MAC address if available (DT) */
@@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 	if (priv->dev->irq == -ENXIO) {
 	if (priv->dev->irq == -ENXIO) {
 		pr_err("%s: ERROR: MAC IRQ configuration "
 		pr_err("%s: ERROR: MAC IRQ configuration "
 		       "information not found\n", __func__);
 		       "information not found\n", __func__);
-		ret = -ENXIO;
-		goto out_unmap;
+		return -ENXIO;
 	}
 	}
 
 
 	/*
 	/*
@@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 	pr_debug("STMMAC platform driver registration completed");
 	pr_debug("STMMAC platform driver registration completed");
 
 
 	return 0;
 	return 0;
-
-out_unmap:
-	iounmap(addr);
-	platform_set_drvdata(pdev, NULL);
-
-out_release_region:
-	release_mem_region(res->start, resource_size(res));
-
-	return ret;
 }
 }
 
 
 /**
 /**
@@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 {
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
-	struct resource *res;
 	int ret = stmmac_dvr_remove(ndev);
 	int ret = stmmac_dvr_remove(ndev);
 
 
 	if (priv->plat->exit)
 	if (priv->plat->exit)
@@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 
 
 	platform_set_drvdata(pdev, NULL);
 	platform_set_drvdata(pdev, NULL);
 
 
-	iounmap((void __force __iomem *)priv->ioaddr);
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	release_mem_region(res->start, resource_size(res));
-
 	return ret;
 	return ret;
 }
 }
 
 

+ 4 - 0
drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h

@@ -21,6 +21,8 @@
 
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 *******************************************************************************/
+#ifndef __STMMAC_TIMER_H__
+#define __STMMAC_TIMER_H__
 
 
 struct stmmac_timer {
 struct stmmac_timer {
 	void (*timer_start) (unsigned int new_freq);
 	void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
 extern int tmu2_register_user(void *fnt, void *data);
 extern int tmu2_register_user(void *fnt, void *data);
 extern void tmu2_unregister_user(void);
 extern void tmu2_unregister_user(void);
 #endif
 #endif
+
+#endif /* __STMMAC_TIMER_H__ */

+ 0 - 1
drivers/net/ethernet/tundra/tsi108_eth.c

@@ -1358,7 +1358,6 @@ static int tsi108_open(struct net_device *dev)
 			break;
 			break;
 		}
 		}
 
 
-		data->rxskbs[i] = skb;
 		data->rxskbs[i] = skb;
 		data->rxskbs[i] = skb;
 		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
 		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
 		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
 		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;

+ 1 - 2
drivers/net/ethernet/wiznet/w5100.c

@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
 	if (data && is_valid_ether_addr(data->mac_addr)) {
 	if (data && is_valid_ether_addr(data->mac_addr)) {
 		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
 		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
 	} else {
 	} else {
-		eth_random_addr(ndev->dev_addr);
-		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+		eth_hw_addr_random(ndev);
 	}
 	}
 
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

+ 1 - 2
drivers/net/ethernet/wiznet/w5300.c

@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
 	if (data && is_valid_ether_addr(data->mac_addr)) {
 	if (data && is_valid_ether_addr(data->mac_addr)) {
 		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
 		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
 	} else {
 	} else {
-		eth_random_addr(ndev->dev_addr);
-		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+		eth_hw_addr_random(ndev);
 	}
 	}
 
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

+ 0 - 0
drivers/ieee802154/Kconfig → drivers/net/ieee802154/Kconfig


+ 0 - 0
drivers/ieee802154/Makefile → drivers/net/ieee802154/Makefile


+ 1 - 11
drivers/ieee802154/at86rf230.c → drivers/net/ieee802154/at86rf230.c

@@ -952,17 +952,7 @@ static struct spi_driver at86rf230_driver = {
 	.resume     = at86rf230_resume,
 	.resume     = at86rf230_resume,
 };
 };
 
 
-static int __init at86rf230_init(void)
-{
-	return spi_register_driver(&at86rf230_driver);
-}
-module_init(at86rf230_init);
-
-static void __exit at86rf230_exit(void)
-{
-	spi_unregister_driver(&at86rf230_driver);
-}
-module_exit(at86rf230_exit);
+module_spi_driver(at86rf230_driver);
 
 
 MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
 MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
 MODULE_LICENSE("GPL v2");
 MODULE_LICENSE("GPL v2");

+ 0 - 1
drivers/ieee802154/fakehard.c → drivers/net/ieee802154/fakehard.c

@@ -446,4 +446,3 @@ static __exit void fake_exit(void)
 module_init(fake_init);
 module_init(fake_init);
 module_exit(fake_exit);
 module_exit(fake_exit);
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
-

+ 0 - 0
drivers/ieee802154/fakelb.c → drivers/net/ieee802154/fakelb.c


+ 13 - 0
drivers/net/phy/Kconfig

@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
 	  several child MDIO busses to a parent bus.  Child bus
 	  several child MDIO busses to a parent bus.  Child bus
 	  selection is under the control of GPIO lines.
 	  selection is under the control of GPIO lines.
 
 
+config MDIO_BUS_MUX_MMIOREG
+	tristate "Support for MMIO device-controlled MDIO bus multiplexers"
+	depends on OF_MDIO
+	select MDIO_BUS_MUX
+	help
+	  This module provides a driver for MDIO bus multiplexers that
+	  are controlled via a simple memory-mapped device, like an FPGA.
+	  The multiplexer connects one of several child MDIO busses to a
+	  parent bus.  Child bus selection is under the control of one of
+	  the FPGA's registers.
+
+	  Currently, only 8-bit registers are supported.
+
 endif # PHYLIB
 endif # PHYLIB
 
 
 config MICREL_KS8995MA
 config MICREL_KS8995MA

+ 1 - 0
drivers/net/phy/Makefile

@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA)	+= spi_ks8995.o
 obj-$(CONFIG_AMD_PHY)		+= amd.o
 obj-$(CONFIG_AMD_PHY)		+= amd.o
 obj-$(CONFIG_MDIO_BUS_MUX)	+= mdio-mux.o
 obj-$(CONFIG_MDIO_BUS_MUX)	+= mdio-mux.o
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)	+= mdio-mux-gpio.o
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)	+= mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно