Browse Source

Merge branch 'upstream-fixes'

Jeff Garzik 19 years ago
parent
commit
b4ea75b649
82 changed files with 1408 additions and 1128 deletions
  1. 1 0
      arch/arm/configs/ep80219_defconfig
  2. 1 0
      arch/arm/configs/iq31244_defconfig
  3. 1 0
      arch/arm/configs/iq80321_defconfig
  4. 1 0
      arch/arm/configs/iq80331_defconfig
  5. 1 0
      arch/arm/configs/iq80332_defconfig
  6. 2 0
      arch/i386/defconfig
  7. 1 0
      arch/ia64/configs/gensparse_defconfig
  8. 1 0
      arch/ia64/configs/tiger_defconfig
  9. 1 0
      arch/ia64/configs/zx1_defconfig
  10. 1 0
      arch/ia64/defconfig
  11. 1 0
      arch/parisc/configs/a500_defconfig
  12. 1 0
      arch/parisc/configs/c3000_defconfig
  13. 1 0
      arch/powerpc/configs/cell_defconfig
  14. 1 0
      arch/powerpc/configs/g5_defconfig
  15. 1 0
      arch/powerpc/configs/iseries_defconfig
  16. 1 0
      arch/powerpc/configs/maple_defconfig
  17. 1 0
      arch/powerpc/configs/ppc64_defconfig
  18. 1 0
      arch/powerpc/configs/pseries_defconfig
  19. 1 0
      arch/ppc/configs/bamboo_defconfig
  20. 1 0
      arch/ppc/configs/katana_defconfig
  21. 1 0
      arch/ppc/configs/mpc834x_sys_defconfig
  22. 1 0
      arch/ppc/configs/power3_defconfig
  23. 6 7
      arch/sparc/mm/iommu.c
  24. 1 0
      arch/sparc64/defconfig
  25. 11 11
      arch/sparc64/kernel/time.c
  26. 1 0
      arch/x86_64/defconfig
  27. 9 0
      drivers/net/Kconfig
  28. 2 2
      drivers/net/cassini.c
  29. 153 154
      drivers/net/e1000/e1000_ethtool.c
  30. 10 8
      drivers/net/e1000/e1000_hw.c
  31. 11 9
      drivers/net/e1000/e1000_hw.h
  32. 478 324
      drivers/net/e1000/e1000_main.c
  33. 1 1
      drivers/net/e1000/e1000_osdep.h
  34. 22 22
      drivers/net/e1000/e1000_param.c
  35. 54 28
      drivers/net/tg3.c
  36. 1 0
      drivers/net/tg3.h
  37. 4 1
      drivers/pci/quirks.c
  38. 10 0
      drivers/scsi/ahci.c
  39. 3 0
      drivers/scsi/ata_piix.c
  40. 63 10
      drivers/scsi/libata-core.c
  41. 16 0
      drivers/scsi/sata_promise.c
  42. 1 0
      drivers/scsi/sata_svw.c
  43. 3 6
      drivers/video/sbuslib.c
  44. 1 1
      drivers/video/sbuslib.h
  45. 26 3
      fs/xfs/linux-2.6/xfs_aops.c
  46. 2 2
      include/asm-powerpc/lppaca.h
  47. 1 0
      include/linux/kernel.h
  48. 8 3
      include/linux/libata.h
  49. 0 9
      include/linux/netfilter_ipv6/ip6t_ah.h
  50. 0 9
      include/linux/netfilter_ipv6/ip6t_esp.h
  51. 0 9
      include/linux/netfilter_ipv6/ip6t_frag.h
  52. 0 9
      include/linux/netfilter_ipv6/ip6t_opts.h
  53. 0 9
      include/linux/netfilter_ipv6/ip6t_rt.h
  54. 1 1
      include/linux/skbuff.h
  55. 3 1
      net/bridge/netfilter/ebt_ip.c
  56. 3 1
      net/bridge/netfilter/ebt_log.c
  57. 6 7
      net/core/filter.c
  58. 1 1
      net/core/netpoll.c
  59. 11 23
      net/core/pktgen.c
  60. 1 1
      net/dccp/ackvec.c
  61. 0 1
      net/ipv4/netfilter/Makefile
  62. 1 0
      net/ipv4/netfilter/ip_conntrack_proto_gre.c
  63. 5 2
      net/ipv4/netfilter/ipt_policy.c
  64. 4 10
      net/ipv4/route.c
  65. 1 1
      net/ipv6/addrconf.c
  66. 1 1
      net/ipv6/anycast.c
  67. 2 2
      net/ipv6/ip6_flowlabel.c
  68. 3 3
      net/ipv6/mcast.c
  69. 0 1
      net/ipv6/netfilter/Makefile
  70. 75 76
      net/ipv6/netfilter/ip6t_dst.c
  71. 34 34
      net/ipv6/netfilter/ip6t_eui64.c
  72. 78 79
      net/ipv6/netfilter/ip6t_frag.c
  73. 75 76
      net/ipv6/netfilter/ip6t_hbh.c
  74. 39 40
      net/ipv6/netfilter/ip6t_ipv6header.c
  75. 14 14
      net/ipv6/netfilter/ip6t_owner.c
  76. 1 1
      net/ipv6/netfilter/ip6t_policy.c
  77. 112 103
      net/ipv6/netfilter/ip6t_rt.c
  78. 1 1
      net/rxrpc/krxtimod.c
  79. 6 6
      net/rxrpc/proc.c
  80. 3 4
      net/sched/sch_prio.c
  81. 4 0
      net/sched/sch_sfq.c
  82. 2 1
      sound/sparc/cs4231.c

+ 1 - 0
arch/arm/configs/ep80219_defconfig

@@ -522,6 +522,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/arm/configs/iq31244_defconfig

@@ -493,6 +493,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/arm/configs/iq80321_defconfig

@@ -415,6 +415,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/arm/configs/iq80331_defconfig

@@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/arm/configs/iq80332_defconfig

@@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 2 - 0
arch/i386/defconfig

@@ -644,6 +644,8 @@ CONFIG_8139TOO_PIO=y
 # CONFIG_ACENIC is not set
 # CONFIG_DL2K is not set
 # CONFIG_E1000 is not set
+# CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ia64/configs/gensparse_defconfig

@@ -557,6 +557,7 @@ CONFIG_E100=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ia64/configs/tiger_defconfig

@@ -565,6 +565,7 @@ CONFIG_E100=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ia64/configs/zx1_defconfig

@@ -548,6 +548,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ia64/defconfig

@@ -565,6 +565,7 @@ CONFIG_E100=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/parisc/configs/a500_defconfig

@@ -602,6 +602,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/parisc/configs/c3000_defconfig

@@ -626,6 +626,7 @@ CONFIG_ACENIC=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/powerpc/configs/cell_defconfig

@@ -533,6 +533,7 @@ CONFIG_MII=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/powerpc/configs/g5_defconfig

@@ -675,6 +675,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/powerpc/configs/iseries_defconfig

@@ -567,6 +567,7 @@ CONFIG_ACENIC=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/powerpc/configs/maple_defconfig

@@ -454,6 +454,7 @@ CONFIG_AMD8111_ETH=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/powerpc/configs/ppc64_defconfig

@@ -724,6 +724,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/powerpc/configs/pseries_defconfig

@@ -671,6 +671,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ppc/configs/bamboo_defconfig

@@ -499,6 +499,7 @@ CONFIG_NATSEMI=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ppc/configs/katana_defconfig

@@ -488,6 +488,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ppc/configs/mpc834x_sys_defconfig

@@ -402,6 +402,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 1 - 0
arch/ppc/configs/power3_defconfig

@@ -442,6 +442,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 6 - 7
arch/sparc/mm/iommu.c

@@ -295,8 +295,7 @@ static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
 	int ioptex;
 	int i;
 
-	if (busa < iommu->start)
-		BUG();
+	BUG_ON(busa < iommu->start);
 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
 	for (i = 0; i < npages; i++) {
 		iopte_val(iommu->page_table[ioptex + i]) = 0;
@@ -340,9 +339,9 @@ static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
 	iopte_t *first;
 	int ioptex;
 
-	if ((va & ~PAGE_MASK) != 0) BUG();
-	if ((addr & ~PAGE_MASK) != 0) BUG();
-	if ((len & ~PAGE_MASK) != 0) BUG();
+	BUG_ON((va & ~PAGE_MASK) != 0);
+	BUG_ON((addr & ~PAGE_MASK) != 0);
+	BUG_ON((len & ~PAGE_MASK) != 0);
 
 	/* page color = physical address */
 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
@@ -405,8 +404,8 @@ static void iommu_unmap_dma_area(unsigned long busa, int len)
 	unsigned long end;
 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
 
-	if ((busa & ~PAGE_MASK) != 0) BUG();
-	if ((len & ~PAGE_MASK) != 0) BUG();
+	BUG_ON((busa & ~PAGE_MASK) != 0);
+	BUG_ON((len & ~PAGE_MASK) != 0);
 
 	iopte += ioptex;
 	end = busa + len;

+ 1 - 0
arch/sparc64/defconfig

@@ -529,6 +529,7 @@ CONFIG_NET_PCI=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_MYRI_SBUS is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set

+ 11 - 11
arch/sparc64/kernel/time.c

@@ -280,9 +280,9 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
  * Since STICK is constantly updating, we have to access it carefully.
  *
  * The sequence we use to read is:
- * 1) read low
- * 2) read high
- * 3) read low again, if it rolled over increment high by 1
+ * 1) read high
+ * 2) read low
+ * 3) read high again, if it rolled re-read both low and high again.
  *
  * Writing STICK safely is also tricky:
  * 1) write low to zero
@@ -295,18 +295,18 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
 static unsigned long __hbird_read_stick(void)
 {
 	unsigned long ret, tmp1, tmp2, tmp3;
-	unsigned long addr = HBIRD_STICK_ADDR;
+	unsigned long addr = HBIRD_STICK_ADDR+8;
 
-	__asm__ __volatile__("ldxa	[%1] %5, %2\n\t"
-			     "add	%1, 0x8, %1\n\t"
-			     "ldxa	[%1] %5, %3\n\t"
+	__asm__ __volatile__("ldxa	[%1] %5, %2\n"
+			     "1:\n\t"
 			     "sub	%1, 0x8, %1\n\t"
+			     "ldxa	[%1] %5, %3\n\t"
+			     "add	%1, 0x8, %1\n\t"
 			     "ldxa	[%1] %5, %4\n\t"
 			     "cmp	%4, %2\n\t"
-			     "blu,a,pn	%%xcc, 1f\n\t"
-			     " add	%3, 1, %3\n"
-			     "1:\n\t"
-			     "sllx	%3, 32, %3\n\t"
+			     "bne,a,pn	%%xcc, 1b\n\t"
+			     " mov	%4, %2\n\t"
+			     "sllx	%4, 32, %4\n\t"
 			     "or	%3, %4, %0\n\t"
 			     : "=&r" (ret), "=&r" (addr),
 			       "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)

+ 1 - 0
arch/x86_64/defconfig

@@ -646,6 +646,7 @@ CONFIG_8139TOO=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set

+ 9 - 0
drivers/net/Kconfig

@@ -1914,6 +1914,15 @@ config E1000_NAPI
 
 	  If in doubt, say N.
 
+config E1000_DISABLE_PACKET_SPLIT
+	bool "Disable Packet Split for PCI express adapters"
+	depends on E1000
+	help
+	  Say Y here if you want to use the legacy receive path for PCI express
+	  hadware.
+
+	  If in doubt, say N.
+
 source "drivers/net/ixp2000/Kconfig"
 
 config MYRI_SBUS

+ 2 - 2
drivers/net/cassini.c

@@ -1925,8 +1925,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
 	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
 #endif
 	if (netif_msg_intr(cp))
-		printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
-			cp->dev->name, status, compwb);
+		printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
+			cp->dev->name, status, (unsigned long long)compwb);
 	/* process all the rings */
 	for (ring = 0; ring < N_TX_RINGS; ring++) {
 #ifdef USE_TX_COMPWB

+ 153 - 154
drivers/net/e1000/e1000_ethtool.c

@@ -121,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
-	if(hw->media_type == e1000_media_type_copper) {
+	if (hw->media_type == e1000_media_type_copper) {
 
 		ecmd->supported = (SUPPORTED_10baseT_Half |
 		                   SUPPORTED_10baseT_Full |
@@ -133,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
 		ecmd->advertising = ADVERTISED_TP;
 
-		if(hw->autoneg == 1) {
+		if (hw->autoneg == 1) {
 			ecmd->advertising |= ADVERTISED_Autoneg;
 
 			/* the e1000 autoneg seems to match ethtool nicely */
@@ -144,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 		ecmd->port = PORT_TP;
 		ecmd->phy_address = hw->phy_addr;
 
-		if(hw->mac_type == e1000_82543)
+		if (hw->mac_type == e1000_82543)
 			ecmd->transceiver = XCVR_EXTERNAL;
 		else
 			ecmd->transceiver = XCVR_INTERNAL;
@@ -160,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
 		ecmd->port = PORT_FIBRE;
 
-		if(hw->mac_type >= e1000_82545)
+		if (hw->mac_type >= e1000_82545)
 			ecmd->transceiver = XCVR_INTERNAL;
 		else
 			ecmd->transceiver = XCVR_EXTERNAL;
 	}
 
-	if(netif_carrier_ok(adapter->netdev)) {
+	if (netif_carrier_ok(adapter->netdev)) {
 
 		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
 		                                   &adapter->link_duplex);
@@ -175,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 		/* unfortunatly FULL_DUPLEX != DUPLEX_FULL
 		 *          and HALF_DUPLEX != DUPLEX_HALF */
 
-		if(adapter->link_duplex == FULL_DUPLEX)
+		if (adapter->link_duplex == FULL_DUPLEX)
 			ecmd->duplex = DUPLEX_FULL;
 		else
 			ecmd->duplex = DUPLEX_HALF;
@@ -205,11 +205,11 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
 	if (ecmd->autoneg == AUTONEG_ENABLE) {
 		hw->autoneg = 1;
-		if(hw->media_type == e1000_media_type_fiber)
+		if (hw->media_type == e1000_media_type_fiber)
 			hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
 				     ADVERTISED_FIBRE |
 				     ADVERTISED_Autoneg;
-		else 
+		else
 			hw->autoneg_advertised = ADVERTISED_10baseT_Half |
 						  ADVERTISED_10baseT_Full |
 						  ADVERTISED_100baseT_Half |
@@ -219,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 						  ADVERTISED_TP;
 		ecmd->advertising = hw->autoneg_advertised;
 	} else
-		if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
+		if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
 			return -EINVAL;
 
 	/* reset the link */
 
-	if(netif_running(adapter->netdev)) {
+	if (netif_running(adapter->netdev)) {
 		e1000_down(adapter);
 		e1000_reset(adapter);
 		e1000_up(adapter);
@@ -241,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
-	pause->autoneg = 
+	pause->autoneg =
 		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
-	
-	if(hw->fc == e1000_fc_rx_pause)
+
+	if (hw->fc == e1000_fc_rx_pause)
 		pause->rx_pause = 1;
-	else if(hw->fc == e1000_fc_tx_pause)
+	else if (hw->fc == e1000_fc_tx_pause)
 		pause->tx_pause = 1;
-	else if(hw->fc == e1000_fc_full) {
+	else if (hw->fc == e1000_fc_full) {
 		pause->rx_pause = 1;
 		pause->tx_pause = 1;
 	}
@@ -260,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	
+
 	adapter->fc_autoneg = pause->autoneg;
 
-	if(pause->rx_pause && pause->tx_pause)
+	if (pause->rx_pause && pause->tx_pause)
 		hw->fc = e1000_fc_full;
-	else if(pause->rx_pause && !pause->tx_pause)
+	else if (pause->rx_pause && !pause->tx_pause)
 		hw->fc = e1000_fc_rx_pause;
-	else if(!pause->rx_pause && pause->tx_pause)
+	else if (!pause->rx_pause && pause->tx_pause)
 		hw->fc = e1000_fc_tx_pause;
-	else if(!pause->rx_pause && !pause->tx_pause)
+	else if (!pause->rx_pause && !pause->tx_pause)
 		hw->fc = e1000_fc_none;
 
 	hw->original_fc = hw->fc;
 
-	if(adapter->fc_autoneg == AUTONEG_ENABLE) {
-		if(netif_running(adapter->netdev)) {
+	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+		if (netif_running(adapter->netdev)) {
 			e1000_down(adapter);
 			e1000_up(adapter);
 		} else
 			e1000_reset(adapter);
-	}
-	else
+	} else
 		return ((hw->media_type == e1000_media_type_fiber) ?
 			e1000_setup_link(hw) : e1000_force_mac_fc(hw));
-	
+
 	return 0;
 }
 
@@ -301,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	adapter->rx_csum = data;
 
-	if(netif_running(netdev)) {
+	if (netif_running(netdev)) {
 		e1000_down(adapter);
 		e1000_up(adapter);
 	} else
 		e1000_reset(adapter);
 	return 0;
 }
-	
+
 static uint32_t
 e1000_get_tx_csum(struct net_device *netdev)
 {
@@ -320,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
-	if(adapter->hw.mac_type < e1000_82543) {
+	if (adapter->hw.mac_type < e1000_82543) {
 		if (!data)
 			return -EINVAL;
 		return 0;
@@ -339,8 +338,8 @@ static int
 e1000_set_tso(struct net_device *netdev, uint32_t data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	if((adapter->hw.mac_type < e1000_82544) ||
-	    (adapter->hw.mac_type == e1000_82547)) 
+	if ((adapter->hw.mac_type < e1000_82544) ||
+	    (adapter->hw.mac_type == e1000_82547))
 		return data ? -EINVAL : 0;
 
 	if (data)
@@ -348,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
 	else
 		netdev->features &= ~NETIF_F_TSO;
 	return 0;
-} 
+}
 #endif /* NETIF_F_TSO */
 
 static uint32_t
@@ -365,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
 	adapter->msg_enable = data;
 }
 
-static int 
+static int
 e1000_get_regs_len(struct net_device *netdev)
 {
 #define E1000_REGS_LEN 32
@@ -401,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
 	regs_buff[11] = E1000_READ_REG(hw, TIDV);
 
 	regs_buff[12] = adapter->hw.phy_type;  /* PHY type (IGP=1, M88=0) */
-	if(hw->phy_type == e1000_phy_igp) {
+	if (hw->phy_type == e1000_phy_igp) {
 		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
 				    IGP01E1000_PHY_AGC_A);
 		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -455,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
 	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
 	regs_buff[24] = (uint32_t)phy_data;  /* phy local receiver status */
 	regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
-	if(hw->mac_type >= e1000_82540 &&
+	if (hw->mac_type >= e1000_82540 &&
 	   hw->media_type == e1000_media_type_copper) {
 		regs_buff[26] = E1000_READ_REG(hw, MANC);
 	}
@@ -479,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
 	int ret_val = 0;
 	uint16_t i;
 
-	if(eeprom->len == 0)
+	if (eeprom->len == 0)
 		return -EINVAL;
 
 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -489,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
 
 	eeprom_buff = kmalloc(sizeof(uint16_t) *
 			(last_word - first_word + 1), GFP_KERNEL);
-	if(!eeprom_buff)
+	if (!eeprom_buff)
 		return -ENOMEM;
 
-	if(hw->eeprom.type == e1000_eeprom_spi)
+	if (hw->eeprom.type == e1000_eeprom_spi)
 		ret_val = e1000_read_eeprom(hw, first_word,
 					    last_word - first_word + 1,
 					    eeprom_buff);
 	else {
 		for (i = 0; i < last_word - first_word + 1; i++)
-			if((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+			if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
 							&eeprom_buff[i])))
 				break;
 	}
@@ -525,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
 	int max_len, first_word, last_word, ret_val = 0;
 	uint16_t i;
 
-	if(eeprom->len == 0)
+	if (eeprom->len == 0)
 		return -EOPNOTSUPP;
 
-	if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
 		return -EFAULT;
 
 	max_len = hw->eeprom.word_size * 2;
@@ -536,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
 	first_word = eeprom->offset >> 1;
 	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
 	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
-	if(!eeprom_buff)
+	if (!eeprom_buff)
 		return -ENOMEM;
 
 	ptr = (void *)eeprom_buff;
 
-	if(eeprom->offset & 1) {
+	if (eeprom->offset & 1) {
 		/* need read/modify/write of first changed EEPROM word */
 		/* only the second byte of the word is being modified */
 		ret_val = e1000_read_eeprom(hw, first_word, 1,
 					    &eeprom_buff[0]);
 		ptr++;
 	}
-	if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
 		/* need read/modify/write of last changed EEPROM word */
 		/* only the first byte of the word is being modified */
 		ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -567,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
 	ret_val = e1000_write_eeprom(hw, first_word,
 				     last_word - first_word + 1, eeprom_buff);
 
-	/* Update the checksum over the first part of the EEPROM if needed 
+	/* Update the checksum over the first part of the EEPROM if needed
 	 * and flush shadow RAM for 82573 conrollers */
-	if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 
+	if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
 				(hw->mac_type == e1000_82573)))
 		e1000_update_eeprom_checksum(hw);
 
@@ -633,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
 	ring->rx_jumbo_pending = 0;
 }
 
-static int 
+static int
 e1000_set_ringparam(struct net_device *netdev,
                     struct ethtool_ringparam *ring)
 {
@@ -670,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
 	txdr = adapter->tx_ring;
 	rxdr = adapter->rx_ring;
 
-	if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
 
 	rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
 	rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
 		E1000_MAX_RXD : E1000_MAX_82544_RXD));
-	E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 
+	E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
 	txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
 	txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
 		E1000_MAX_TXD : E1000_MAX_82544_TXD));
-	E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 
+	E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
 	for (i = 0; i < adapter->num_tx_queues; i++)
 		txdr[i].count = txdr->count;
 	for (i = 0; i < adapter->num_rx_queues; i++)
 		rxdr[i].count = rxdr->count;
 
-	if(netif_running(adapter->netdev)) {
+	if (netif_running(adapter->netdev)) {
 		/* Try to get new resources before deleting old */
 		if ((err = e1000_setup_all_rx_resources(adapter)))
 			goto err_setup_rx;
@@ -708,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
 		kfree(rx_old);
 		adapter->rx_ring = rx_new;
 		adapter->tx_ring = tx_new;
-		if((err = e1000_up(adapter)))
+		if ((err = e1000_up(adapter)))
 			return err;
 	}
 
@@ -727,10 +726,10 @@ err_setup_rx:
 	uint32_t pat, value;                                                   \
 	uint32_t test[] =                                                      \
 		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};              \
-	for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) {              \
+	for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) {              \
 		E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W));             \
 		value = E1000_READ_REG(&adapter->hw, R);                       \
-		if(value != (test[pat] & W & M)) {                             \
+		if (value != (test[pat] & W & M)) {                             \
 			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
 			        "0x%08X expected 0x%08X\n",                    \
 			        E1000_##R, value, (test[pat] & W & M));        \
@@ -746,7 +745,7 @@ err_setup_rx:
 	uint32_t value;                                                        \
 	E1000_WRITE_REG(&adapter->hw, R, W & M);                               \
 	value = E1000_READ_REG(&adapter->hw, R);                               \
-	if((W & M) != (value & M)) {                                          \
+	if ((W & M) != (value & M)) {                                          \
 		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
 		        "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
 		*data = (adapter->hw.mac_type < e1000_82543) ?                 \
@@ -782,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
 	value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
 	E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
 	after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
-	if(value != after) {
+	if (value != after) {
 		DPRINTK(DRV, ERR, "failed STATUS register test got: "
 		        "0x%08X expected: 0x%08X\n", after, value);
 		*data = 1;
@@ -810,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
 	REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
 	REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
 
-	if(adapter->hw.mac_type >= e1000_82543) {
+	if (adapter->hw.mac_type >= e1000_82543) {
 
 		REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
 		REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -818,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
 		REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
 		REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
 
-		for(i = 0; i < E1000_RAR_ENTRIES; i++) {
+		for (i = 0; i < E1000_RAR_ENTRIES; i++) {
 			REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
 					 0xFFFFFFFF);
 			REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -834,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
 
 	}
 
-	for(i = 0; i < E1000_MC_TBL_SIZE; i++)
+	for (i = 0; i < E1000_MC_TBL_SIZE; i++)
 		REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
 
 	*data = 0;
@@ -850,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
 
 	*data = 0;
 	/* Read and add up the contents of the EEPROM */
-	for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
-		if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
+	for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+		if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
 			*data = 1;
 			break;
 		}
@@ -859,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
 	}
 
 	/* If Checksum is not Correct return error else test passed */
-	if((checksum != (uint16_t) EEPROM_SUM) && !(*data))
+	if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
 		*data = 2;
 
 	return *data;
@@ -888,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 	*data = 0;
 
 	/* Hook up test interrupt handler just for this test */
- 	if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+ 	if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
  		shared_int = FALSE;
- 	} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
+ 	} else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
 			      netdev->name, netdev)){
 		*data = 1;
 		return -1;
@@ -901,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 	msec_delay(10);
 
 	/* Test each interrupt */
-	for(; i < 10; i++) {
+	for (; i < 10; i++) {
 
 		/* Interrupt to test */
 		mask = 1 << i;
 
- 		if(!shared_int) {
+ 		if (!shared_int) {
  			/* Disable the interrupt to be reported in
  			 * the cause register and then force the same
  			 * interrupt and see if one gets posted.  If
@@ -917,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
  			E1000_WRITE_REG(&adapter->hw, IMC, mask);
  			E1000_WRITE_REG(&adapter->hw, ICS, mask);
  			msec_delay(10);
- 
- 			if(adapter->test_icr & mask) {
+
+ 			if (adapter->test_icr & mask) {
  				*data = 3;
  				break;
  			}
@@ -935,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 		E1000_WRITE_REG(&adapter->hw, ICS, mask);
 		msec_delay(10);
 
-		if(!(adapter->test_icr & mask)) {
+		if (!(adapter->test_icr & mask)) {
 			*data = 4;
 			break;
 		}
 
- 		if(!shared_int) {
+ 		if (!shared_int) {
 			/* Disable the other interrupts to be reported in
 			 * the cause register and then force the other
 			 * interrupts and see if any get posted.  If
@@ -952,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 			E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
 			msec_delay(10);
 
-			if(adapter->test_icr) {
+			if (adapter->test_icr) {
 				*data = 5;
 				break;
 			}
@@ -977,24 +976,24 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
 	struct pci_dev *pdev = adapter->pdev;
 	int i;
 
-	if(txdr->desc && txdr->buffer_info) {
-		for(i = 0; i < txdr->count; i++) {
-			if(txdr->buffer_info[i].dma)
+	if (txdr->desc && txdr->buffer_info) {
+		for (i = 0; i < txdr->count; i++) {
+			if (txdr->buffer_info[i].dma)
 				pci_unmap_single(pdev, txdr->buffer_info[i].dma,
 						 txdr->buffer_info[i].length,
 						 PCI_DMA_TODEVICE);
-			if(txdr->buffer_info[i].skb)
+			if (txdr->buffer_info[i].skb)
 				dev_kfree_skb(txdr->buffer_info[i].skb);
 		}
 	}
 
-	if(rxdr->desc && rxdr->buffer_info) {
-		for(i = 0; i < rxdr->count; i++) {
-			if(rxdr->buffer_info[i].dma)
+	if (rxdr->desc && rxdr->buffer_info) {
+		for (i = 0; i < rxdr->count; i++) {
+			if (rxdr->buffer_info[i].dma)
 				pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
 						 rxdr->buffer_info[i].length,
 						 PCI_DMA_FROMDEVICE);
-			if(rxdr->buffer_info[i].skb)
+			if (rxdr->buffer_info[i].skb)
 				dev_kfree_skb(rxdr->buffer_info[i].skb);
 		}
 	}
@@ -1027,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
 	/* Setup Tx descriptor ring and Tx buffers */
 
-	if(!txdr->count)
-		txdr->count = E1000_DEFAULT_TXD;   
+	if (!txdr->count)
+		txdr->count = E1000_DEFAULT_TXD;
 
 	size = txdr->count * sizeof(struct e1000_buffer);
-	if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+	if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
 		ret_val = 1;
 		goto err_nomem;
 	}
@@ -1039,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
 	E1000_ROUNDUP(txdr->size, 4096);
-	if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
+	if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
 		ret_val = 2;
 		goto err_nomem;
 	}
@@ -1058,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
 			E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
 
-	for(i = 0; i < txdr->count; i++) {
+	for (i = 0; i < txdr->count; i++) {
 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
 		struct sk_buff *skb;
 		unsigned int size = 1024;
 
-		if(!(skb = alloc_skb(size, GFP_KERNEL))) {
+		if (!(skb = alloc_skb(size, GFP_KERNEL))) {
 			ret_val = 3;
 			goto err_nomem;
 		}
@@ -1083,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
 	/* Setup Rx descriptor ring and Rx buffers */
 
-	if(!rxdr->count)
-		rxdr->count = E1000_DEFAULT_RXD;   
+	if (!rxdr->count)
+		rxdr->count = E1000_DEFAULT_RXD;
 
 	size = rxdr->count * sizeof(struct e1000_buffer);
-	if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+	if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
 		ret_val = 4;
 		goto err_nomem;
 	}
 	memset(rxdr->buffer_info, 0, size);
 
 	rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
-	if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
+	if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
 		ret_val = 5;
 		goto err_nomem;
 	}
@@ -1114,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 		(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
 	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
 
-	for(i = 0; i < rxdr->count; i++) {
+	for (i = 0; i < rxdr->count; i++) {
 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
 		struct sk_buff *skb;
 
-		if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
+		if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
 				GFP_KERNEL))) {
 			ret_val = 6;
 			goto err_nomem;
@@ -1227,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
 
 	/* Check Phy Configuration */
 	e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
-	if(phy_reg != 0x4100)
+	if (phy_reg != 0x4100)
 		 return 9;
 
 	e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
-	if(phy_reg != 0x0070)
+	if (phy_reg != 0x0070)
 		return 10;
 
 	e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
-	if(phy_reg != 0x001A)
+	if (phy_reg != 0x001A)
 		return 11;
 
 	return 0;
@@ -1249,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 
 	adapter->hw.autoneg = FALSE;
 
-	if(adapter->hw.phy_type == e1000_phy_m88) {
+	if (adapter->hw.phy_type == e1000_phy_m88) {
 		/* Auto-MDI/MDIX Off */
 		e1000_write_phy_reg(&adapter->hw,
 				    M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1269,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 		     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
 		     E1000_CTRL_FD);	 /* Force Duplex to FULL */
 
-	if(adapter->hw.media_type == e1000_media_type_copper &&
+	if (adapter->hw.media_type == e1000_media_type_copper &&
 	   adapter->hw.phy_type == e1000_phy_m88) {
 		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
 	} else {
 		/* Set the ILOS bit on the fiber Nic is half
 		 * duplex link is detected. */
 		stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
-		if((stat_reg & E1000_STATUS_FD) == 0)
+		if ((stat_reg & E1000_STATUS_FD) == 0)
 			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
 	}
 
@@ -1285,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 	/* Disable the receiver on the PHY so when a cable is plugged in, the
 	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
 	 */
-	if(adapter->hw.phy_type == e1000_phy_m88)
+	if (adapter->hw.phy_type == e1000_phy_m88)
 		e1000_phy_disable_receiver(adapter);
 
 	udelay(500);
@@ -1301,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
 
 	switch (adapter->hw.mac_type) {
 	case e1000_82543:
-		if(adapter->hw.media_type == e1000_media_type_copper) {
+		if (adapter->hw.media_type == e1000_media_type_copper) {
 			/* Attempt to setup Loopback mode on Non-integrated PHY.
 			 * Some PHY registers get corrupted at random, so
 			 * attempt this 10 times.
 			 */
-			while(e1000_nonintegrated_phy_loopback(adapter) &&
+			while (e1000_nonintegrated_phy_loopback(adapter) &&
 			      count++ < 10);
-			if(count < 11)
+			if (count < 11)
 				return 0;
 		}
 		break;
@@ -1430,8 +1429,8 @@ static int
 e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
 	frame_size &= ~1;
-	if(*(skb->data + 3) == 0xFF) {
-		if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
 		   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
 			return 0;
 		}
@@ -1450,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
 
 	E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
 
-	/* Calculate the loop count based on the largest descriptor ring 
+	/* Calculate the loop count based on the largest descriptor ring
 	 * The idea is to wrap the largest ring a number of times using 64
 	 * send/receive pairs during each loop
 	 */
 
-	if(rxdr->count <= txdr->count)
+	if (rxdr->count <= txdr->count)
 		lc = ((txdr->count / 64) * 2) + 1;
 	else
 		lc = ((rxdr->count / 64) * 2) + 1;
 
 	k = l = 0;
-	for(j = 0; j <= lc; j++) { /* loop count loop */
-		for(i = 0; i < 64; i++) { /* send the packets */
-			e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 
+	for (j = 0; j <= lc; j++) { /* loop count loop */
+		for (i = 0; i < 64; i++) { /* send the packets */
+			e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
 					1024);
-			pci_dma_sync_single_for_device(pdev, 
+			pci_dma_sync_single_for_device(pdev,
 					txdr->buffer_info[k].dma,
 				    	txdr->buffer_info[k].length,
 				    	PCI_DMA_TODEVICE);
-			if(unlikely(++k == txdr->count)) k = 0;
+			if (unlikely(++k == txdr->count)) k = 0;
 		}
 		E1000_WRITE_REG(&adapter->hw, TDT, k);
 		msec_delay(200);
 		time = jiffies; /* set the start time for the receive */
 		good_cnt = 0;
 		do { /* receive the sent packets */
-			pci_dma_sync_single_for_cpu(pdev, 
+			pci_dma_sync_single_for_cpu(pdev,
 					rxdr->buffer_info[l].dma,
 				    	rxdr->buffer_info[l].length,
 				    	PCI_DMA_FROMDEVICE);
-	
+
 			ret_val = e1000_check_lbtest_frame(
 					rxdr->buffer_info[l].skb,
 				   	1024);
-			if(!ret_val)
+			if (!ret_val)
 				good_cnt++;
-			if(unlikely(++l == rxdr->count)) l = 0;
-			/* time + 20 msecs (200 msecs on 2.4) is more than 
-			 * enough time to complete the receives, if it's 
+			if (unlikely(++l == rxdr->count)) l = 0;
+			/* time + 20 msecs (200 msecs on 2.4) is more than
+			 * enough time to complete the receives, if it's
 			 * exceeded, break and error off
 			 */
 		} while (good_cnt < 64 && jiffies < (time + 20));
-		if(good_cnt != 64) {
+		if (good_cnt != 64) {
 			ret_val = 13; /* ret_val is the same as mis-compare */
-			break; 
+			break;
 		}
-		if(jiffies >= (time + 2)) {
+		if (jiffies >= (time + 2)) {
 			ret_val = 14; /* error code for time out error */
 			break;
 		}
@@ -1549,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
 		*data = 1;
 	} else {
 		e1000_check_for_link(&adapter->hw);
-		if(adapter->hw.autoneg)  /* if auto_neg is set wait for it */
+		if (adapter->hw.autoneg)  /* if auto_neg is set wait for it */
 			msec_delay(4000);
 
-		if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
+		if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
 			*data = 1;
 		}
 	}
 	return *data;
 }
 
-static int 
+static int
 e1000_diag_test_count(struct net_device *netdev)
 {
 	return E1000_TEST_LEN;
@@ -1572,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	boolean_t if_running = netif_running(netdev);
 
-	if(eth_test->flags == ETH_TEST_FL_OFFLINE) {
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 
 		/* save speed, duplex, autoneg settings */
@@ -1582,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
 
 		/* Link test performed before hardware reset so autoneg doesn't
 		 * interfere with test result */
-		if(e1000_link_test(adapter, &data[4]))
+		if (e1000_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		if(if_running)
+		if (if_running)
 			e1000_down(adapter);
 		else
 			e1000_reset(adapter);
 
-		if(e1000_reg_test(adapter, &data[0]))
+		if (e1000_reg_test(adapter, &data[0]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		e1000_reset(adapter);
-		if(e1000_eeprom_test(adapter, &data[1]))
+		if (e1000_eeprom_test(adapter, &data[1]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		e1000_reset(adapter);
-		if(e1000_intr_test(adapter, &data[2]))
+		if (e1000_intr_test(adapter, &data[2]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		e1000_reset(adapter);
-		if(e1000_loopback_test(adapter, &data[3]))
+		if (e1000_loopback_test(adapter, &data[3]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		/* restore speed, duplex, autoneg settings */
@@ -1611,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
 		adapter->hw.autoneg = autoneg;
 
 		e1000_reset(adapter);
-		if(if_running)
+		if (if_running)
 			e1000_up(adapter);
 	} else {
 		/* Online tests */
-		if(e1000_link_test(adapter, &data[4]))
+		if (e1000_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		/* Offline tests aren't run; pass by default */
@@ -1633,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
-	switch(adapter->hw.device_id) {
+	switch (adapter->hw.device_id) {
 	case E1000_DEV_ID_82542:
 	case E1000_DEV_ID_82543GC_FIBER:
 	case E1000_DEV_ID_82543GC_COPPER:
@@ -1649,7 +1648,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	case E1000_DEV_ID_82546GB_FIBER:
 	case E1000_DEV_ID_82571EB_FIBER:
 		/* Wake events only supported on port A for dual fiber */
-		if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
+		if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
 			wol->supported = 0;
 			wol->wolopts   = 0;
 			return;
@@ -1661,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 				 WAKE_BCAST | WAKE_MAGIC;
 
 		wol->wolopts = 0;
-		if(adapter->wol & E1000_WUFC_EX)
+		if (adapter->wol & E1000_WUFC_EX)
 			wol->wolopts |= WAKE_UCAST;
-		if(adapter->wol & E1000_WUFC_MC)
+		if (adapter->wol & E1000_WUFC_MC)
 			wol->wolopts |= WAKE_MCAST;
-		if(adapter->wol & E1000_WUFC_BC)
+		if (adapter->wol & E1000_WUFC_BC)
 			wol->wolopts |= WAKE_BCAST;
-		if(adapter->wol & E1000_WUFC_MAG)
+		if (adapter->wol & E1000_WUFC_MAG)
 			wol->wolopts |= WAKE_MAGIC;
 		return;
 	}
@@ -1679,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
-	switch(adapter->hw.device_id) {
+	switch (adapter->hw.device_id) {
 	case E1000_DEV_ID_82542:
 	case E1000_DEV_ID_82543GC_FIBER:
 	case E1000_DEV_ID_82543GC_COPPER:
@@ -1693,23 +1692,23 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	case E1000_DEV_ID_82546GB_FIBER:
 	case E1000_DEV_ID_82571EB_FIBER:
 		/* Wake events only supported on port A for dual fiber */
-		if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+		if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
 			return wol->wolopts ? -EOPNOTSUPP : 0;
 		/* Fall Through */
 
 	default:
-		if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+		if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
 			return -EOPNOTSUPP;
 
 		adapter->wol = 0;
 
-		if(wol->wolopts & WAKE_UCAST)
+		if (wol->wolopts & WAKE_UCAST)
 			adapter->wol |= E1000_WUFC_EX;
-		if(wol->wolopts & WAKE_MCAST)
+		if (wol->wolopts & WAKE_MCAST)
 			adapter->wol |= E1000_WUFC_MC;
-		if(wol->wolopts & WAKE_BCAST)
+		if (wol->wolopts & WAKE_BCAST)
 			adapter->wol |= E1000_WUFC_BC;
-		if(wol->wolopts & WAKE_MAGIC)
+		if (wol->wolopts & WAKE_MAGIC)
 			adapter->wol |= E1000_WUFC_MAG;
 	}
 
@@ -1727,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
 {
 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
 
-	if(test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+	if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
 		e1000_led_off(&adapter->hw);
 	else
 		e1000_led_on(&adapter->hw);
@@ -1740,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
-	if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
+	if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
 		data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
 
-	if(adapter->hw.mac_type < e1000_82571) {
-		if(!adapter->blink_timer.function) {
+	if (adapter->hw.mac_type < e1000_82571) {
+		if (!adapter->blink_timer.function) {
 			init_timer(&adapter->blink_timer);
 			adapter->blink_timer.function = e1000_led_blink_callback;
 			adapter->blink_timer.data = (unsigned long) adapter;
@@ -1782,21 +1781,21 @@ static int
 e1000_nway_reset(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	if(netif_running(netdev)) {
+	if (netif_running(netdev)) {
 		e1000_down(adapter);
 		e1000_up(adapter);
 	}
 	return 0;
 }
 
-static int 
+static int
 e1000_get_stats_count(struct net_device *netdev)
 {
 	return E1000_STATS_LEN;
 }
 
-static void 
-e1000_get_ethtool_stats(struct net_device *netdev, 
+static void
+e1000_get_ethtool_stats(struct net_device *netdev,
 		struct ethtool_stats *stats, uint64_t *data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1830,7 +1829,7 @@ e1000_get_ethtool_stats(struct net_device *netdev,
 /*	BUG_ON(i != E1000_STATS_LEN); */
 }
 
-static void 
+static void
 e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
 {
 #ifdef CONFIG_E1000_MQ
@@ -1839,9 +1838,9 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
 	uint8_t *p = data;
 	int i;
 
-	switch(stringset) {
+	switch (stringset) {
 	case ETH_SS_TEST:
-		memcpy(data, *e1000_gstrings_test, 
+		memcpy(data, *e1000_gstrings_test,
 			E1000_TEST_LEN*ETH_GSTRING_LEN);
 		break;
 	case ETH_SS_STATS:

+ 10 - 8
drivers/net/e1000/e1000_hw.c

@@ -1600,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
     if(ret_val)
         return ret_val;
 
-        /* Read the MII 1000Base-T Control Register (Address 9). */
-        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
-        if(ret_val)
-            return ret_val;
+    /* Read the MII 1000Base-T Control Register (Address 9). */
+    ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+    if(ret_val)
+        return ret_val;
 
     /* Need to parse both autoneg_advertised and fc and set up
      * the appropriate PHY registers.  First we will parse for
@@ -3916,7 +3916,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
         }
     }
 
-    if(eeprom->use_eerd == TRUE) {
+    if (eeprom->use_eerd == TRUE) {
         ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
         if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
             (hw->mac_type != e1000_82573))
@@ -4423,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
             return -E1000_ERR_EEPROM;
         }
 
-	/* If STM opcode located in bits 15:8 of flop, reset firmware */
+        /* If STM opcode located in bits 15:8 of flop, reset firmware */
         if ((flop & 0xFF00) == E1000_STM_OPCODE) {
             E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
         }
@@ -4431,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
         /* Perform the flash update */
         E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
 
-	for (i=0; i < attempts; i++) {
+        for (i=0; i < attempts; i++) {
             eecd = E1000_READ_REG(hw, EECD);
             if ((eecd & E1000_EECD_FLUPD) == 0) {
                 break;
@@ -4504,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
         hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
         hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
     }
+
     switch (hw->mac_type) {
     default:
         break;
@@ -6840,7 +6841,8 @@ int32_t
 e1000_check_phy_reset_block(struct e1000_hw *hw)
 {
     uint32_t manc = 0;
-    if(hw->mac_type > e1000_82547_rev_2)
+
+    if (hw->mac_type > e1000_82547_rev_2)
         manc = E1000_READ_REG(hw, MANC);
     return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
 	    E1000_BLK_PHY_RESET : E1000_SUCCESS;

+ 11 - 9
drivers/net/e1000/e1000_hw.h

@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
 void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
 
 /* Filters (multicast, vlan, receive) */
+void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
 uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
 void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
 void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
 void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
 /* Port I/O is only supported on 82544 and newer */
 uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
+uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
 void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
+void e1000_enable_pciex_master(struct e1000_hw *hw);
 int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
 int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
 void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -899,14 +902,14 @@ struct e1000_ffvt_entry {
 #define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
 #define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
 #define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
-#define E1000_TARC0    0x03840 /* TX Arbitration Count (0) */
-#define E1000_TDBAL1   0x03900 /* TX Desc Base Address Low (1) - RW */
-#define E1000_TDBAH1   0x03904 /* TX Desc Base Address High (1) - RW */
-#define E1000_TDLEN1   0x03908 /* TX Desc Length (1) - RW */
-#define E1000_TDH1     0x03910 /* TX Desc Head (1) - RW */
-#define E1000_TDT1     0x03918 /* TX Desc Tail (1) - RW */
-#define E1000_TXDCTL1  0x03928 /* TX Descriptor Control (1) - RW */
-#define E1000_TARC1    0x03940 /* TX Arbitration Count (1) */
+#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
+#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
 #define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
 #define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
 #define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
@@ -1761,7 +1764,6 @@ struct e1000_hw {
 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
 #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
                                               still to be processed. */
-
 /* Transmit Configuration Word */
 #define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
 #define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */

+ 478 - 324
drivers/net/e1000/e1000_main.c

@@ -29,11 +29,71 @@
 #include "e1000.h"
 
 /* Change Log
- * 6.0.58       4/20/05
- *   o Accepted ethtool cleanup patch from Stephen Hemminger 
- * 6.0.44+	2/15/05
- *   o applied Anton's patch to resolve tx hang in hardware
- *   o Applied Andrew Mortons patch - e1000 stops working after resume
+ * 6.3.9	12/16/2005
+ *   o incorporate fix for recycled skbs from IBM LTC
+ * 6.3.7	11/18/2005
+ *   o Honor eeprom setting for enabling/disabling Wake On Lan
+ * 6.3.5 	11/17/2005
+ *   o Fix memory leak in rx ring handling for PCI Express adapters
+ * 6.3.4	11/8/05
+ *   o Patch from Jesper Juhl to remove redundant NULL checks for kfree
+ * 6.3.2	9/20/05
+ *   o Render logic that sets/resets DRV_LOAD as inline functions to 
+ *     avoid code replication. If f/w is AMT then set DRV_LOAD only when
+ *     network interface is open.
+ *   o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
+ *   o Adjust PBA partioning for Jumbo frames using MTU size and not
+ *     rx_buffer_len
+ * 6.3.1	9/19/05
+ *   o Use adapter->tx_timeout_factor in Tx Hung Detect logic 
+       (e1000_clean_tx_irq)
+ *   o Support for 8086:10B5 device (Quad Port)
+ * 6.2.14	9/15/05
+ *   o In AMT enabled configurations, set/reset DRV_LOAD bit on interface 
+ *     open/close 
+ * 6.2.13       9/14/05
+ *   o Invoke e1000_check_mng_mode only for 8257x controllers since it 
+ *     accesses the FWSM that is not supported in other controllers
+ * 6.2.12       9/9/05
+ *   o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
+ *   o set RCTL:SECRC only for controllers newer than 82543. 
+ *   o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
+ *     This code was moved from e1000_remove to e1000_close
+ * 6.2.10       9/6/05
+ *   o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
+ *   o Enable fc by default on 82573 controllers (do not read eeprom)
+ *   o Fix rx_errors statistic not to include missed_packet_count
+ *   o Fix rx_dropped statistic not to include missed_packet_count 
+       (Padraig Brady)
+ * 6.2.9        8/30/05
+ *   o Remove call to update statistics from the controller ib e1000_get_stats
+ * 6.2.8        8/30/05
+ *   o Improved algorithm for rx buffer allocation/rdt update
+ *   o Flow control watermarks relative to rx PBA size
+ *   o Simplified 'Tx Hung' detect logic
+ * 6.2.7 	8/17/05
+ *   o Report rx buffer allocation failures and tx timeout counts in stats
+ * 6.2.6 	8/16/05
+ *   o Implement workaround for controller erratum -- linear non-tso packet
+ *     following a TSO gets written back prematurely
+ * 6.2.5	8/15/05
+ *   o Set netdev->tx_queue_len based on link speed/duplex settings.
+ *   o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
+ *   o Do not power off PHY if SoL/IDER session is active
+ * 6.2.4	8/10/05
+ *   o Fix loopback test setup/cleanup for 82571/3 controllers
+ *   o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
+ *     all packets as raw
+ *   o Prevent operations that will cause the PHY to be reset if SoL/IDER
+ *     sessions are active and log a message
+ * 6.2.2	7/21/05
+ *   o used fixed size descriptors for all MTU sizes, reduces memory load
+ * 6.1.2	4/13/05
+ *   o Fixed ethtool diagnostics
+ *   o Enabled flow control to take default eeprom settings
+ *   o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
+ *     calls, one from mii_ioctl and other from within update_stats while 
+ *     processing MIIREG ioctl.
  */
 
 char e1000_driver_name[] = "e1000";
@@ -295,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
 static inline void
 e1000_irq_enable(struct e1000_adapter *adapter)
 {
-	if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
+	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
 		E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
 		E1000_WRITE_FLUSH(&adapter->hw);
 	}
@@ -307,17 +367,17 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
 	struct net_device *netdev = adapter->netdev;
 	uint16_t vid = adapter->hw.mng_cookie.vlan_id;
 	uint16_t old_vid = adapter->mng_vlan_id;
-	if(adapter->vlgrp) {
-		if(!adapter->vlgrp->vlan_devices[vid]) {
-			if(adapter->hw.mng_cookie.status &
+	if (adapter->vlgrp) {
+		if (!adapter->vlgrp->vlan_devices[vid]) {
+			if (adapter->hw.mng_cookie.status &
 				E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 				e1000_vlan_rx_add_vid(netdev, vid);
 				adapter->mng_vlan_id = vid;
 			} else
 				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-				
-			if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
-					(vid != old_vid) && 
+
+			if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
+					(vid != old_vid) &&
 					!adapter->vlgrp->vlan_devices[old_vid])
 				e1000_vlan_rx_kill_vid(netdev, old_vid);
 		}
@@ -401,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
 	/* hardware has been reset, we need to reload some things */
 
 	/* Reset the PHY if it was previously powered down */
-	if(adapter->hw.media_type == e1000_media_type_copper) {
+	if (adapter->hw.media_type == e1000_media_type_copper) {
 		uint16_t mii_reg;
 		e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
-		if(mii_reg & MII_CR_POWER_DOWN)
+		if (mii_reg & MII_CR_POWER_DOWN)
 			e1000_phy_reset(&adapter->hw);
 	}
 
@@ -425,16 +485,16 @@ e1000_up(struct e1000_adapter *adapter)
 	}
 
 #ifdef CONFIG_PCI_MSI
-	if(adapter->hw.mac_type > e1000_82547_rev_2) {
+	if (adapter->hw.mac_type > e1000_82547_rev_2) {
 		adapter->have_msi = TRUE;
-		if((err = pci_enable_msi(adapter->pdev))) {
+		if ((err = pci_enable_msi(adapter->pdev))) {
 			DPRINTK(PROBE, ERR,
 			 "Unable to allocate MSI interrupt Error: %d\n", err);
 			adapter->have_msi = FALSE;
 		}
 	}
 #endif
-	if((err = request_irq(adapter->pdev->irq, &e1000_intr,
+	if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
 		              SA_SHIRQ | SA_SAMPLE_RANDOM,
 		              netdev->name, netdev))) {
 		DPRINTK(PROBE, ERR,
@@ -471,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
 #endif
 	free_irq(adapter->pdev->irq, netdev);
 #ifdef CONFIG_PCI_MSI
-	if(adapter->hw.mac_type > e1000_82547_rev_2 &&
+	if (adapter->hw.mac_type > e1000_82547_rev_2 &&
 	   adapter->have_msi == TRUE)
 		pci_disable_msi(adapter->pdev);
 #endif
@@ -537,12 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
 		break;
 	}
 
-	if((adapter->hw.mac_type != e1000_82573) &&
+	if ((adapter->hw.mac_type != e1000_82573) &&
 	   (adapter->netdev->mtu > E1000_RXBUFFER_8192))
 		pba -= 8; /* allocate more FIFO for Tx */
 
 
-	if(adapter->hw.mac_type == e1000_82547) {
+	if (adapter->hw.mac_type == e1000_82547) {
 		adapter->tx_fifo_head = 0;
 		adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 		adapter->tx_fifo_size =
@@ -565,9 +625,9 @@ e1000_reset(struct e1000_adapter *adapter)
 
 	/* Allow time for pending master requests to run */
 	e1000_reset_hw(&adapter->hw);
-	if(adapter->hw.mac_type >= e1000_82544)
+	if (adapter->hw.mac_type >= e1000_82544)
 		E1000_WRITE_REG(&adapter->hw, WUC, 0);
-	if(e1000_init_hw(&adapter->hw))
+	if (e1000_init_hw(&adapter->hw))
 		DPRINTK(PROBE, ERR, "Hardware Error\n");
 	e1000_update_mng_vlan(adapter);
 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -606,26 +666,26 @@ e1000_probe(struct pci_dev *pdev,
 	int i, err, pci_using_dac;
 	uint16_t eeprom_data;
 	uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
-	if((err = pci_enable_device(pdev)))
+	if ((err = pci_enable_device(pdev)))
 		return err;
 
-	if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+	if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
 		pci_using_dac = 1;
 	} else {
-		if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+		if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
 			E1000_ERR("No usable DMA configuration, aborting\n");
 			return err;
 		}
 		pci_using_dac = 0;
 	}
 
-	if((err = pci_request_regions(pdev, e1000_driver_name)))
+	if ((err = pci_request_regions(pdev, e1000_driver_name)))
 		return err;
 
 	pci_set_master(pdev);
 
 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
-	if(!netdev) {
+	if (!netdev) {
 		err = -ENOMEM;
 		goto err_alloc_etherdev;
 	}
@@ -644,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
 	mmio_len = pci_resource_len(pdev, BAR_0);
 
 	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
-	if(!adapter->hw.hw_addr) {
+	if (!adapter->hw.hw_addr) {
 		err = -EIO;
 		goto err_ioremap;
 	}
 
-	for(i = BAR_1; i <= BAR_5; i++) {
-		if(pci_resource_len(pdev, i) == 0)
+	for (i = BAR_1; i <= BAR_5; i++) {
+		if (pci_resource_len(pdev, i) == 0)
 			continue;
-		if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 			adapter->hw.io_base = pci_resource_start(pdev, i);
 			break;
 		}
@@ -689,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
 
 	/* setup the private structure */
 
-	if((err = e1000_sw_init(adapter)))
+	if ((err = e1000_sw_init(adapter)))
 		goto err_sw_init;
 
-	if((err = e1000_check_phy_reset_block(&adapter->hw)))
+	if ((err = e1000_check_phy_reset_block(&adapter->hw)))
 		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
 
-	if(adapter->hw.mac_type >= e1000_82543) {
+	if (adapter->hw.mac_type >= e1000_82543) {
 		netdev->features = NETIF_F_SG |
 				   NETIF_F_HW_CSUM |
 				   NETIF_F_HW_VLAN_TX |
@@ -704,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
 	}
 
 #ifdef NETIF_F_TSO
-	if((adapter->hw.mac_type >= e1000_82544) &&
+	if ((adapter->hw.mac_type >= e1000_82544) &&
 	   (adapter->hw.mac_type != e1000_82547))
 		netdev->features |= NETIF_F_TSO;
 
 #ifdef NETIF_F_TSO_IPV6
-	if(adapter->hw.mac_type > e1000_82547_rev_2)
+	if (adapter->hw.mac_type > e1000_82547_rev_2)
 		netdev->features |= NETIF_F_TSO_IPV6;
 #endif
 #endif
-	if(pci_using_dac)
+	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
  	/* hard_start_xmit is safe against parallel locking */
@@ -721,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
  
 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
 
-	/* before reading the EEPROM, reset the controller to 
+	/* before reading the EEPROM, reset the controller to
 	 * put the device in a known good starting state */
-	
+
 	e1000_reset_hw(&adapter->hw);
 
 	/* make sure the EEPROM is good */
 
-	if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
+	if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
 		DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
 		err = -EIO;
 		goto err_eeprom;
@@ -736,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
 
 	/* copy the MAC address out of the EEPROM */
 
-	if(e1000_read_mac_addr(&adapter->hw))
+	if (e1000_read_mac_addr(&adapter->hw))
 		DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
 	memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
 	memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
 
-	if(!is_valid_ether_addr(netdev->perm_addr)) {
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
 		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
 		err = -EIO;
 		goto err_eeprom;
@@ -781,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
 	 * enable the ACPI Magic Packet filter
 	 */
 
-	switch(adapter->hw.mac_type) {
+	switch (adapter->hw.mac_type) {
 	case e1000_82542_rev2_0:
 	case e1000_82542_rev2_1:
 	case e1000_82543:
@@ -794,7 +854,7 @@ e1000_probe(struct pci_dev *pdev,
 	case e1000_82546:
 	case e1000_82546_rev_3:
 	case e1000_82571:
-		if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
+		if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
 			e1000_read_eeprom(&adapter->hw,
 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
 			break;
@@ -805,7 +865,7 @@ e1000_probe(struct pci_dev *pdev,
 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
 		break;
 	}
-	if(eeprom_data & eeprom_apme_mask)
+	if (eeprom_data & eeprom_apme_mask)
 		adapter->wol |= E1000_WUFC_MAG;
 
 	/* print bus type/speed/width info */
@@ -840,7 +900,7 @@ e1000_probe(struct pci_dev *pdev,
 		e1000_get_hw_control(adapter);
 
 	strcpy(netdev->name, "eth%d");
-	if((err = register_netdev(netdev)))
+	if ((err = register_netdev(netdev)))
 		goto err_register;
 
 	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -881,10 +941,10 @@ e1000_remove(struct pci_dev *pdev)
 
 	flush_scheduled_work();
 
-	if(adapter->hw.mac_type >= e1000_82540 &&
+	if (adapter->hw.mac_type >= e1000_82540 &&
 	   adapter->hw.media_type == e1000_media_type_copper) {
 		manc = E1000_READ_REG(&adapter->hw, MANC);
-		if(manc & E1000_MANC_SMBUS_EN) {
+		if (manc & E1000_MANC_SMBUS_EN) {
 			manc |= E1000_MANC_ARP_EN;
 			E1000_WRITE_REG(&adapter->hw, MANC, manc);
 		}
@@ -900,7 +960,7 @@ e1000_remove(struct pci_dev *pdev)
 		__dev_put(&adapter->polling_netdev[i]);
 #endif
 
-	if(!e1000_check_phy_reset_block(&adapter->hw))
+	if (!e1000_check_phy_reset_block(&adapter->hw))
 		e1000_phy_hw_reset(&adapter->hw);
 
 	kfree(adapter->tx_ring);
@@ -959,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
 
 	/* identify the MAC */
 
-	if(e1000_set_mac_type(hw)) {
+	if (e1000_set_mac_type(hw)) {
 		DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
 		return -EIO;
 	}
 
 	/* initialize eeprom parameters */
 
-	if(e1000_init_eeprom_params(hw)) {
+	if (e1000_init_eeprom_params(hw)) {
 		E1000_ERR("EEPROM initialization failed\n");
 		return -EIO;
 	}
 
-	switch(hw->mac_type) {
+	switch (hw->mac_type) {
 	default:
 		break;
 	case e1000_82541:
@@ -990,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
 
 	/* Copper options */
 
-	if(hw->media_type == e1000_media_type_copper) {
+	if (hw->media_type == e1000_media_type_copper) {
 		hw->mdix = AUTO_ALL_MODES;
 		hw->disable_polarity_correction = FALSE;
 		hw->master_slave = E1000_MASTER_SLAVE;
@@ -1166,10 +1226,10 @@ e1000_open(struct net_device *netdev)
 	if ((err = e1000_setup_all_rx_resources(adapter)))
 		goto err_setup_rx;
 
-	if((err = e1000_up(adapter)))
+	if ((err = e1000_up(adapter)))
 		goto err_up;
 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-	if((adapter->hw.mng_cookie.status &
+	if ((adapter->hw.mng_cookie.status &
 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
 		e1000_update_mng_vlan(adapter);
 	}
@@ -1214,7 +1274,7 @@ e1000_close(struct net_device *netdev)
 	e1000_free_all_tx_resources(adapter);
 	e1000_free_all_rx_resources(adapter);
 
-	if((adapter->hw.mng_cookie.status &
+	if ((adapter->hw.mng_cookie.status &
 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
 	}
@@ -1269,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
 	size = sizeof(struct e1000_buffer) * txdr->count;
 
 	txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
-	if(!txdr->buffer_info) {
+	if (!txdr->buffer_info) {
 		DPRINTK(PROBE, ERR,
 		"Unable to allocate memory for the transmit descriptor ring\n");
 		return -ENOMEM;
@@ -1282,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
 	E1000_ROUNDUP(txdr->size, 4096);
 
 	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
-	if(!txdr->desc) {
+	if (!txdr->desc) {
 setup_tx_desc_die:
 		vfree(txdr->buffer_info);
 		DPRINTK(PROBE, ERR,
@@ -1298,8 +1358,8 @@ setup_tx_desc_die:
 				     "at %p\n", txdr->size, txdr->desc);
 		/* Try again, without freeing the previous */
 		txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
-		if(!txdr->desc) {
 		/* Failed allocation, critical failure */
+		if (!txdr->desc) {
 			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
 			goto setup_tx_desc_die;
 		}
@@ -1499,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
 
 	size = sizeof(struct e1000_ps_page) * rxdr->count;
 	rxdr->ps_page = kmalloc(size, GFP_KERNEL);
-	if(!rxdr->ps_page) {
+	if (!rxdr->ps_page) {
 		vfree(rxdr->buffer_info);
 		DPRINTK(PROBE, ERR,
 		"Unable to allocate memory for the receive descriptor ring\n");
@@ -1509,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
 
 	size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
 	rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
-	if(!rxdr->ps_page_dma) {
+	if (!rxdr->ps_page_dma) {
 		vfree(rxdr->buffer_info);
 		kfree(rxdr->ps_page);
 		DPRINTK(PROBE, ERR,
@@ -1518,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
 	}
 	memset(rxdr->ps_page_dma, 0, size);
 
-	if(adapter->hw.mac_type <= e1000_82547_rev_2)
+	if (adapter->hw.mac_type <= e1000_82547_rev_2)
 		desc_len = sizeof(struct e1000_rx_desc);
 	else
 		desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1621,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
 {
 	uint32_t rctl, rfctl;
 	uint32_t psrctl = 0;
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
 	uint32_t pages = 0;
 #endif
 
@@ -1647,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
 		rctl |= E1000_RCTL_LPE;
 
 	/* Setup buffer sizes */
-	if(adapter->hw.mac_type >= e1000_82571) {
+	if (adapter->hw.mac_type >= e1000_82571) {
 		/* We can now specify buffers in 1K increments.
 		 * BSIZE and BSEX are ignored in this case. */
 		rctl |= adapter->rx_buffer_len << 0x11;
 	} else {
 		rctl &= ~E1000_RCTL_SZ_4096;
-		rctl |= E1000_RCTL_BSEX; 
-		switch (adapter->rx_buffer_len) {
-		case E1000_RXBUFFER_2048:
-		default:
-			rctl |= E1000_RCTL_SZ_2048;
-			rctl &= ~E1000_RCTL_BSEX;
-			break;
-		case E1000_RXBUFFER_4096:
-			rctl |= E1000_RCTL_SZ_4096;
-			break;
-		case E1000_RXBUFFER_8192:
-			rctl |= E1000_RCTL_SZ_8192;
-			break;
-		case E1000_RXBUFFER_16384:
-			rctl |= E1000_RCTL_SZ_16384;
-			break;
-		}
+		rctl &= ~E1000_RCTL_BSEX;
+		rctl |= E1000_RCTL_SZ_2048;
 	}
 
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
 	/* 82571 and greater support packet-split where the protocol
 	 * header is placed in skb->data and the packet data is
 	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1696,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
 		E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
 
 		rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
-		
+
 		psrctl |= adapter->rx_ps_bsize0 >>
 			E1000_PSRCTL_BSIZE0_SHIFT;
 
@@ -1758,7 +1803,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
 
 	if (hw->mac_type >= e1000_82540) {
 		E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
-		if(adapter->itr > 1)
+		if (adapter->itr > 1)
 			E1000_WRITE_REG(hw, ITR,
 				1000000000 / (adapter->itr * 256));
 	}
@@ -1847,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
 	if (hw->mac_type >= e1000_82543) {
 		rxcsum = E1000_READ_REG(hw, RXCSUM);
-		if(adapter->rx_csum == TRUE) {
+		if (adapter->rx_csum == TRUE) {
 			rxcsum |= E1000_RXCSUM_TUOFL;
 
 			/* Enable 82571 IPv4 payload checksum for UDP fragments
 			 * Must be used in conjunction with packet-split. */
-			if ((hw->mac_type >= e1000_82571) && 
-			   (adapter->rx_ps_pages)) {
+			if ((hw->mac_type >= e1000_82571) &&
+			    (adapter->rx_ps_pages)) {
 				rxcsum |= E1000_RXCSUM_IPPCSE;
 			}
 		} else {
@@ -1915,7 +1960,7 @@ static inline void
 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
 			struct e1000_buffer *buffer_info)
 {
-	if(buffer_info->dma) {
+	if (buffer_info->dma) {
 		pci_unmap_page(adapter->pdev,
 				buffer_info->dma,
 				buffer_info->length,
@@ -1942,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
 
 	/* Free all the Tx ring sk_buffs */
 
-	for(i = 0; i < tx_ring->count; i++) {
+	for (i = 0; i < tx_ring->count; i++) {
 		buffer_info = &tx_ring->buffer_info[i];
 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
 	}
@@ -2038,10 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
 	unsigned int i, j;
 
 	/* Free all the Rx ring sk_buffs */
-
-	for(i = 0; i < rx_ring->count; i++) {
+	for (i = 0; i < rx_ring->count; i++) {
 		buffer_info = &rx_ring->buffer_info[i];
-		if(buffer_info->skb) {
+		if (buffer_info->skb) {
 			pci_unmap_single(pdev,
 					 buffer_info->dma,
 					 buffer_info->length,
@@ -2122,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
 	E1000_WRITE_FLUSH(&adapter->hw);
 	mdelay(5);
 
-	if(netif_running(netdev))
+	if (netif_running(netdev))
 		e1000_clean_all_rx_rings(adapter);
 }
 
@@ -2138,13 +2182,13 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
 	E1000_WRITE_FLUSH(&adapter->hw);
 	mdelay(5);
 
-	if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+	if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
 		e1000_pci_set_mwi(&adapter->hw);
 
-	if(netif_running(netdev)) {
-		e1000_configure_rx(adapter);
+	if (netif_running(netdev)) {
 		/* No need to loop, because 82542 supports only 1 queue */
 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+		e1000_configure_rx(adapter);
 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
 	}
 }
@@ -2163,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct sockaddr *addr = p;
 
-	if(!is_valid_ether_addr(addr->sa_data))
+	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
 	/* 82542 2.0 needs to be in reset to write receive address registers */
 
-	if(adapter->hw.mac_type == e1000_82542_rev2_0)
+	if (adapter->hw.mac_type == e1000_82542_rev2_0)
 		e1000_enter_82542_rst(adapter);
 
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2182,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
 		/* activate the work around */
 		adapter->hw.laa_is_present = 1;
 
-		/* Hold a copy of the LAA in RAR[14] This is done so that 
-		 * between the time RAR[0] gets clobbered  and the time it 
-		 * gets fixed (in e1000_watchdog), the actual LAA is in one 
+		/* Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered  and the time it
+		 * gets fixed (in e1000_watchdog), the actual LAA is in one
 		 * of the RARs and no incoming packets directed to this port
-		 * are dropped. Eventaully the LAA will be in RAR[0] and 
+		 * are dropped. Eventaully the LAA will be in RAR[0] and
 		 * RAR[14] */
-		e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 
+		e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
 					E1000_RAR_ENTRIES - 1);
 	}
 
-	if(adapter->hw.mac_type == e1000_82542_rev2_0)
+	if (adapter->hw.mac_type == e1000_82542_rev2_0)
 		e1000_leave_82542_rst(adapter);
 
 	return 0;
@@ -2226,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
 
 	rctl = E1000_READ_REG(hw, RCTL);
 
-	if(netdev->flags & IFF_PROMISC) {
+	if (netdev->flags & IFF_PROMISC) {
 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-	} else if(netdev->flags & IFF_ALLMULTI) {
+	} else if (netdev->flags & IFF_ALLMULTI) {
 		rctl |= E1000_RCTL_MPE;
 		rctl &= ~E1000_RCTL_UPE;
 	} else {
@@ -2239,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
 
 	/* 82542 2.0 needs to be in reset to write receive address registers */
 
-	if(hw->mac_type == e1000_82542_rev2_0)
+	if (hw->mac_type == e1000_82542_rev2_0)
 		e1000_enter_82542_rst(adapter);
 
 	/* load the first 14 multicast address into the exact filters 1-14
@@ -2249,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
 	 */
 	mc_ptr = netdev->mc_list;
 
-	for(i = 1; i < rar_entries; i++) {
+	for (i = 1; i < rar_entries; i++) {
 		if (mc_ptr) {
 			e1000_rar_set(hw, mc_ptr->dmi_addr, i);
 			mc_ptr = mc_ptr->next;
@@ -2261,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
 
 	/* clear the old settings from the multicast hash table */
 
-	for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
+	for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
 		E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
 
 	/* load any remaining addresses into the hash table */
 
-	for(; mc_ptr; mc_ptr = mc_ptr->next) {
+	for (; mc_ptr; mc_ptr = mc_ptr->next) {
 		hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
 		e1000_mta_set(hw, hash_value);
 	}
 
-	if(hw->mac_type == e1000_82542_rev2_0)
+	if (hw->mac_type == e1000_82542_rev2_0)
 		e1000_leave_82542_rst(adapter);
 }
 
@@ -2297,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
 	struct net_device *netdev = adapter->netdev;
 	uint32_t tctl;
 
-	if(atomic_read(&adapter->tx_fifo_stall)) {
-		if((E1000_READ_REG(&adapter->hw, TDT) ==
+	if (atomic_read(&adapter->tx_fifo_stall)) {
+		if ((E1000_READ_REG(&adapter->hw, TDT) ==
 		    E1000_READ_REG(&adapter->hw, TDH)) &&
 		   (E1000_READ_REG(&adapter->hw, TDFT) ==
 		    E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2350,18 +2394,18 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 	e1000_check_for_link(&adapter->hw);
 	if (adapter->hw.mac_type == e1000_82573) {
 		e1000_enable_tx_pkt_filtering(&adapter->hw);
-		if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
+		if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
 			e1000_update_mng_vlan(adapter);
-	}	
+	}
 
-	if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+	if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
 	   !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
 		link = !adapter->hw.serdes_link_down;
 	else
 		link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
 
-	if(link) {
-		if(!netif_carrier_ok(netdev)) {
+	if (link) {
+		if (!netif_carrier_ok(netdev)) {
 			e1000_get_speed_and_duplex(&adapter->hw,
 			                           &adapter->link_speed,
 			                           &adapter->link_duplex);
@@ -2392,7 +2436,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 			adapter->smartspeed = 0;
 		}
 	} else {
-		if(netif_carrier_ok(netdev)) {
+		if (netif_carrier_ok(netdev)) {
 			adapter->link_speed = 0;
 			adapter->link_duplex = 0;
 			DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2432,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 	}
 
 	/* Dynamic mode for Interrupt Throttle Rate (ITR) */
-	if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
+	if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
 		/* Symmetric Tx/Rx gets a reduced ITR=2000; Total
 		 * asymmetrical Tx or Rx gets ITR=8000; everyone
 		 * else is between 2000-8000. */
 		uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
-		uint32_t dif = (adapter->gotcl > adapter->gorcl ? 
+		uint32_t dif = (adapter->gotcl > adapter->gorcl ?
 			adapter->gotcl - adapter->gorcl :
 			adapter->gorcl - adapter->gotcl) / 10000;
 		uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2450,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = TRUE;
 
-	/* With 82571 controllers, LAA may be overwritten due to controller 
+	/* With 82571 controllers, LAA may be overwritten due to controller
 	 * reset from the other port. Set the appropriate LAA in RAR[0] */
 	if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
 		e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2479,7 +2523,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
 	int err;
 
-	if(skb_shinfo(skb)->tso_size) {
+	if (skb_shinfo(skb)->tso_size) {
 		if (skb_header_cloned(skb)) {
 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 			if (err)
@@ -2488,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
 		mss = skb_shinfo(skb)->tso_size;
-		if(skb->protocol == ntohs(ETH_P_IP)) {
+		if (skb->protocol == ntohs(ETH_P_IP)) {
 			skb->nh.iph->tot_len = 0;
 			skb->nh.iph->check = 0;
 			skb->h.th->check =
@@ -2500,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 			cmd_length = E1000_TXD_CMD_IP;
 			ipcse = skb->h.raw - skb->data - 1;
 #ifdef NETIF_F_TSO_IPV6
-		} else if(skb->protocol == ntohs(ETH_P_IPV6)) {
+		} else if (skb->protocol == ntohs(ETH_P_IPV6)) {
 			skb->nh.ipv6h->payload_len = 0;
 			skb->h.th->check =
 				~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2555,7 +2599,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 	unsigned int i;
 	uint8_t css;
 
-	if(likely(skb->ip_summed == CHECKSUM_HW)) {
+	if (likely(skb->ip_summed == CHECKSUM_HW)) {
 		css = skb->h.raw - skb->data;
 
 		i = tx_ring->next_to_use;
@@ -2595,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
 	i = tx_ring->next_to_use;
 
-	while(len) {
+	while (len) {
 		buffer_info = &tx_ring->buffer_info[i];
 		size = min(len, max_per_txd);
 #ifdef NETIF_F_TSO
@@ -2611,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
 		/* Workaround for premature desc write-backs
 		 * in TSO mode.  Append 4-byte sentinel desc */
-		if(unlikely(mss && !nr_frags && size == len && size > 8))
+		if (unlikely(mss && !nr_frags && size == len && size > 8))
 			size -= 4;
 #endif
 		/* work-around for errata 10 and it applies
@@ -2619,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 		 * The fix is to make sure that the first descriptor of a
 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
 		 */
-		if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+		if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
 		                (size > 2015) && count == 0))
 		        size = 2015;
-                                                                                
+
 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
 		 * terminating buffers within evenly-aligned dwords. */
-		if(unlikely(adapter->pcix_82544 &&
+		if (unlikely(adapter->pcix_82544 &&
 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
 		   size > 4))
 			size -= 4;
@@ -2641,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 		len -= size;
 		offset += size;
 		count++;
-		if(unlikely(++i == tx_ring->count)) i = 0;
+		if (unlikely(++i == tx_ring->count)) i = 0;
 	}
 
-	for(f = 0; f < nr_frags; f++) {
+	for (f = 0; f < nr_frags; f++) {
 		struct skb_frag_struct *frag;
 
 		frag = &skb_shinfo(skb)->frags[f];
 		len = frag->size;
 		offset = frag->page_offset;
 
-		while(len) {
+		while (len) {
 			buffer_info = &tx_ring->buffer_info[i];
 			size = min(len, max_per_txd);
 #ifdef NETIF_F_TSO
 			/* Workaround for premature desc write-backs
 			 * in TSO mode.  Append 4-byte sentinel desc */
-			if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
 				size -= 4;
 #endif
 			/* Workaround for potential 82544 hang in PCI-X.
 			 * Avoid terminating buffers within evenly-aligned
 			 * dwords. */
-			if(unlikely(adapter->pcix_82544 &&
+			if (unlikely(adapter->pcix_82544 &&
 			   !((unsigned long)(frag->page+offset+size-1) & 4) &&
 			   size > 4))
 				size -= 4;
@@ -2680,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 			len -= size;
 			offset += size;
 			count++;
-			if(unlikely(++i == tx_ring->count)) i = 0;
+			if (unlikely(++i == tx_ring->count)) i = 0;
 		}
 	}
 
@@ -2700,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 	uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
 	unsigned int i;
 
-	if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
 		             E1000_TXD_CMD_TSE;
 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
 
-		if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
+		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
 	}
 
-	if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
 	}
 
-	if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
 		txd_lower |= E1000_TXD_CMD_VLE;
 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
 	}
 
 	i = tx_ring->next_to_use;
 
-	while(count--) {
+	while (count--) {
 		buffer_info = &tx_ring->buffer_info[i];
 		tx_desc = E1000_TX_DESC(*tx_ring, i);
 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 		tx_desc->lower.data =
 			cpu_to_le32(txd_lower | buffer_info->length);
 		tx_desc->upper.data = cpu_to_le32(txd_upper);
-		if(unlikely(++i == tx_ring->count)) i = 0;
+		if (unlikely(++i == tx_ring->count)) i = 0;
 	}
 
 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2763,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
 
 	E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
 
-	if(adapter->link_duplex != HALF_DUPLEX)
+	if (adapter->link_duplex != HALF_DUPLEX)
 		goto no_fifo_stall_required;
 
-	if(atomic_read(&adapter->tx_fifo_stall))
+	if (atomic_read(&adapter->tx_fifo_stall))
 		return 1;
 
-	if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
 		atomic_set(&adapter->tx_fifo_stall, 1);
 		return 1;
 	}
 
 no_fifo_stall_required:
 	adapter->tx_fifo_head += skb_fifo_len;
-	if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
+	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
 	return 0;
 }
@@ -2787,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
 {
 	struct e1000_hw *hw =  &adapter->hw;
 	uint16_t length, offset;
-	if(vlan_tx_tag_present(skb)) {
-		if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+	if (vlan_tx_tag_present(skb)) {
+		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
 			( adapter->hw.mng_cookie.status &
 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
 			return 0;
 	}
- 	if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
+	if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
 		struct ethhdr *eth = (struct ethhdr *) skb->data;
-		if((htons(ETH_P_IP) == eth->h_proto)) {
-			const struct iphdr *ip = 
+		if ((htons(ETH_P_IP) == eth->h_proto)) {
+			const struct iphdr *ip =
 				(struct iphdr *)((uint8_t *)skb->data+14);
-			if(IPPROTO_UDP == ip->protocol) {
-				struct udphdr *udp = 
-					(struct udphdr *)((uint8_t *)ip + 
+			if (IPPROTO_UDP == ip->protocol) {
+				struct udphdr *udp =
+					(struct udphdr *)((uint8_t *)ip +
 						(ip->ihl << 2));
-				if(ntohs(udp->dest) == 67) {
+				if (ntohs(udp->dest) == 67) {
 					offset = (uint8_t *)udp + 8 - skb->data;
 					length = skb->len - offset;
 
 					return e1000_mng_write_dhcp_info(hw,
-							(uint8_t *)udp + 8, 
+							(uint8_t *)udp + 8,
 							length);
 				}
 			}
@@ -2830,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	unsigned int nr_frags = 0;
 	unsigned int mss = 0;
 	int count = 0;
-	int tso;
+ 	int tso;
 	unsigned int f;
 	len -= skb->data_len;
 
@@ -2853,7 +2897,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	 * 4 = ceil(buffer len/mss).  To make sure we don't
 	 * overrun the FIFO, adjust the max buffer len if mss
 	 * drops. */
-	if(mss) {
+	if (mss) {
 		uint8_t hdr_len;
 		max_per_txd = min(mss << 2, max_per_txd);
 		max_txd_pwr = fls(max_per_txd) - 1;
@@ -2876,12 +2920,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 		}
 	}
 
-	if((mss) || (skb->ip_summed == CHECKSUM_HW))
 	/* reserve a descriptor for the offload context */
+	if ((mss) || (skb->ip_summed == CHECKSUM_HW))
 		count++;
 	count++;
 #else
-	if(skb->ip_summed == CHECKSUM_HW)
+	if (skb->ip_summed == CHECKSUM_HW)
 		count++;
 #endif
 
@@ -2894,24 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 	count += TXD_USE_COUNT(len, max_txd_pwr);
 
-	if(adapter->pcix_82544)
+	if (adapter->pcix_82544)
 		count++;
 
-	/* work-around for errata 10 and it applies to all controllers 
+	/* work-around for errata 10 and it applies to all controllers
 	 * in PCI-X mode, so add one more descriptor to the count
 	 */
-	if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+	if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
 			(len > 2015)))
 		count++;
 
 	nr_frags = skb_shinfo(skb)->nr_frags;
-	for(f = 0; f < nr_frags; f++)
+	for (f = 0; f < nr_frags; f++)
 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
 				       max_txd_pwr);
-	if(adapter->pcix_82544)
+	if (adapter->pcix_82544)
 		count += nr_frags;
 
-	if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
+	if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
 		e1000_transfer_dhcp_info(adapter, skb);
 
 	local_irq_save(flags);
@@ -2929,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 		return NETDEV_TX_BUSY;
 	}
 
-	if(unlikely(adapter->hw.mac_type == e1000_82547)) {
-		if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+	if (unlikely(adapter->hw.mac_type == e1000_82547)) {
+		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
 			netif_stop_queue(netdev);
 			mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
 			spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2938,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 		}
 	}
 
-	if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
 		tx_flags |= E1000_TX_FLAGS_VLAN;
 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
 	}
 
 	first = tx_ring->next_to_use;
-	
+
 	tso = e1000_tso(adapter, tx_ring, skb);
 	if (tso < 0) {
 		dev_kfree_skb_any(skb);
@@ -3033,9 +3077,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 
-	if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
-		(max_frame > MAX_JUMBO_FRAME_SIZE)) {
-			DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
 
@@ -3083,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
 
 	netdev->mtu = new_mtu;
 
-	if(netif_running(netdev)) {
+	if (netif_running(netdev)) {
 		e1000_down(adapter);
 		e1000_up(adapter);
 	}
@@ -3170,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
 	hw->collision_delta = E1000_READ_REG(hw, COLC);
 	adapter->stats.colc += hw->collision_delta;
 
-	if(hw->mac_type >= e1000_82543) {
+	if (hw->mac_type >= e1000_82543) {
 		adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
 		adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
 		adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3178,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
 		adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
 		adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
 	}
-	if(hw->mac_type > e1000_82547_rev_2) {
+	if (hw->mac_type > e1000_82547_rev_2) {
 		adapter->stats.iac += E1000_READ_REG(hw, IAC);
 		adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
 		adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3222,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
 
 	/* Phy Stats */
 
-	if(hw->media_type == e1000_media_type_copper) {
-		if((adapter->link_speed == SPEED_1000) &&
+	if (hw->media_type == e1000_media_type_copper) {
+		if ((adapter->link_speed == SPEED_1000) &&
 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
 			adapter->phy_stats.idle_errors += phy_tmp;
 		}
 
-		if((hw->mac_type <= e1000_82546) &&
+		if ((hw->mac_type <= e1000_82546) &&
 		   (hw->phy_type == e1000_phy_m88) &&
 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
 			adapter->phy_stats.receive_errors += phy_tmp;
@@ -3294,7 +3338,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
 		return IRQ_NONE;  /* Not our interrupt */
 	}
 
-	if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
 		hw->get_link_status = 1;
 		mod_timer(&adapter->watchdog_timer, jiffies);
 	}
@@ -3326,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
 
 #else /* if !CONFIG_E1000_NAPI */
 	/* Writing IMC and IMS is needed for 82547.
-	   Due to Hub Link bus being occupied, an interrupt
-	   de-assertion message is not able to be sent.
-	   When an interrupt assertion message is generated later,
-	   two messages are re-ordered and sent out.
-	   That causes APIC to think 82547 is in de-assertion
-	   state, while 82547 is in assertion state, resulting
-	   in dead lock. Writing IMC forces 82547 into
-	   de-assertion state.
-	*/
-	if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
+	 * Due to Hub Link bus being occupied, an interrupt
+	 * de-assertion message is not able to be sent.
+	 * When an interrupt assertion message is generated later,
+	 * two messages are re-ordered and sent out.
+	 * That causes APIC to think 82547 is in de-assertion
+	 * state, while 82547 is in assertion state, resulting
+	 * in dead lock. Writing IMC forces 82547 into
+	 * de-assertion state.
+	 */
+	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
 		atomic_inc(&adapter->irq_sem);
 		E1000_WRITE_REG(hw, IMC, ~0);
 	}
 
-	for(i = 0; i < E1000_MAX_INTR; i++)
-		if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+	for (i = 0; i < E1000_MAX_INTR; i++)
+		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
 		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
 			break;
 
-	if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
+	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
 		e1000_irq_enable(adapter);
 
 #endif /* CONFIG_E1000_NAPI */
@@ -3397,9 +3441,9 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 
 	*budget -= work_done;
 	poll_dev->quota -= work_done;
-	
+
 	/* If no Tx and not enough Rx work done, exit the polling mode */
-	if((!tx_cleaned && (work_done == 0)) ||
+	if ((!tx_cleaned && (work_done == 0)) ||
 	   !netif_running(adapter->netdev)) {
 quit_polling:
 		netif_rx_complete(poll_dev);
@@ -3431,7 +3475,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
 
 	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
-		for(cleaned = FALSE; !cleaned; ) {
+		for (cleaned = FALSE; !cleaned; ) {
 			tx_desc = E1000_TX_DESC(*tx_ring, i);
 			buffer_info = &tx_ring->buffer_info[i];
 			cleaned = (i == eop);
@@ -3442,7 +3486,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
 			memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
 
-			if(unlikely(++i == tx_ring->count)) i = 0;
+			if (unlikely(++i == tx_ring->count)) i = 0;
 		}
 
 #ifdef CONFIG_E1000_MQ
@@ -3457,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
 	spin_lock(&tx_ring->tx_lock);
 
-	if(unlikely(cleaned && netif_queue_stopped(netdev) &&
+	if (unlikely(cleaned && netif_queue_stopped(netdev) &&
 		    netif_carrier_ok(netdev)))
 		netif_wake_queue(netdev);
 
@@ -3519,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
 	skb->ip_summed = CHECKSUM_NONE;
 
 	/* 82543 or newer only */
-	if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
+	if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
 	/* Ignore Checksum bit is set */
-	if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
+	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
 	/* TCP/UDP checksum error bit is set */
-	if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
+	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
 		/* let the stack verify checksum errors */
 		adapter->hw_csum_err++;
 		return;
 	}
 	/* TCP/UDP Checksum has not been calculated */
-	if(adapter->hw.mac_type <= e1000_82547_rev_2) {
-		if(!(status & E1000_RXD_STAT_TCPCS))
+	if (adapter->hw.mac_type <= e1000_82547_rev_2) {
+		if (!(status & E1000_RXD_STAT_TCPCS))
 			return;
 	} else {
-		if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
 			return;
 	}
 	/* It must be a TCP or UDP packet with a valid checksum */
@@ -3569,9 +3613,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 {
 	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
-	struct e1000_rx_desc *rx_desc;
-	struct e1000_buffer *buffer_info;
-	struct sk_buff *skb;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
 	unsigned long flags;
 	uint32_t length;
 	uint8_t last_byte;
@@ -3581,16 +3624,25 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
 	i = rx_ring->next_to_clean;
 	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
 
-	while(rx_desc->status & E1000_RXD_STAT_DD) {
-		buffer_info = &rx_ring->buffer_info[i];
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb, *next_skb;
 		u8 status;
 #ifdef CONFIG_E1000_NAPI
-		if(*work_done >= work_to_do)
+		if (*work_done >= work_to_do)
 			break;
 		(*work_done)++;
 #endif
 		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		next_buffer = &rx_ring->buffer_info[i];
+		next_skb = next_buffer->skb;
+
 		cleaned = TRUE;
 		cleaned_count++;
 		pci_unmap_single(pdev,
@@ -3598,20 +3650,50 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 		                 buffer_info->length,
 		                 PCI_DMA_FROMDEVICE);
 
-		skb = buffer_info->skb;
 		length = le16_to_cpu(rx_desc->length);
 
-		if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
-			/* All receives must fit into a single buffer */
-			E1000_DBG("%s: Receive packet consumed multiple"
-				  " buffers\n", netdev->name);
-			dev_kfree_skb_irq(skb);
+		skb_put(skb, length);
+
+		if (!(status & E1000_RXD_STAT_EOP)) {
+			if (!rx_ring->rx_skb_top) {
+				rx_ring->rx_skb_top = skb;
+				rx_ring->rx_skb_top->len = length;
+				rx_ring->rx_skb_prev = skb;
+			} else {
+				if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
+					rx_ring->rx_skb_prev->next = skb;
+					skb->prev = rx_ring->rx_skb_prev;
+				} else {
+					skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
+				}
+				rx_ring->rx_skb_prev = skb;
+				rx_ring->rx_skb_top->data_len += length;
+			}
 			goto next_desc;
+		} else {
+			if (rx_ring->rx_skb_top) {
+				if (skb_shinfo(rx_ring->rx_skb_top)
+							->frag_list) {
+					rx_ring->rx_skb_prev->next = skb;
+					skb->prev = rx_ring->rx_skb_prev;
+				} else
+					skb_shinfo(rx_ring->rx_skb_top)
+							->frag_list = skb;
+
+				rx_ring->rx_skb_top->data_len += length;
+				rx_ring->rx_skb_top->len +=
+					rx_ring->rx_skb_top->data_len;
+
+				skb = rx_ring->rx_skb_top;
+				multi_descriptor = TRUE;
+				rx_ring->rx_skb_top = NULL;
+				rx_ring->rx_skb_prev = NULL;
+			}
 		}
 
-		if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
 			last_byte = *(skb->data + length - 1);
-			if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
+			if (TBI_ACCEPT(&adapter->hw, status,
 			              rx_desc->errors, length, last_byte)) {
 				spin_lock_irqsave(&adapter->stats_lock, flags);
 				e1000_tbi_adjust_stats(&adapter->hw,
@@ -3656,9 +3738,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 				  (uint32_t)(status) |
 				  ((uint32_t)(rx_desc->errors) << 24),
 				  rx_desc->csum, skb);
+
 		skb->protocol = eth_type_trans(skb, netdev);
 #ifdef CONFIG_E1000_NAPI
-		if(unlikely(adapter->vlgrp &&
+		if (unlikely(adapter->vlgrp &&
 			    (status & E1000_RXD_STAT_VP))) {
 			vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
 						 le16_to_cpu(rx_desc->special) &
@@ -3667,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 			netif_receive_skb(skb);
 		}
 #else /* CONFIG_E1000_NAPI */
-		if(unlikely(adapter->vlgrp &&
-			    (rx_desc->status & E1000_RXD_STAT_VP))) {
+		if (unlikely(adapter->vlgrp &&
+			    (status & E1000_RXD_STAT_VP))) {
 			vlan_hwaccel_rx(skb, adapter->vlgrp,
 					le16_to_cpu(rx_desc->special) &
 					E1000_RXD_SPC_VLAN_MASK);
@@ -3691,6 +3774,8 @@ next_desc:
 			cleaned_count = 0;
 		}
 
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
 	}
 	rx_ring->next_to_clean = i;
 
@@ -3716,13 +3801,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                       struct e1000_rx_ring *rx_ring)
 #endif
 {
-	union e1000_rx_desc_packet_split *rx_desc;
+	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
 	struct net_device *netdev = adapter->netdev;
 	struct pci_dev *pdev = adapter->pdev;
-	struct e1000_buffer *buffer_info;
+	struct e1000_buffer *buffer_info, *next_buffer;
 	struct e1000_ps_page *ps_page;
 	struct e1000_ps_page_dma *ps_page_dma;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *next_skb;
 	unsigned int i, j;
 	uint32_t length, staterr;
 	int cleaned_count = 0;
@@ -3731,39 +3816,44 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 	i = rx_ring->next_to_clean;
 	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+	buffer_info = &rx_ring->buffer_info[i];
 
-	while(staterr & E1000_RXD_STAT_DD) {
-		buffer_info = &rx_ring->buffer_info[i];
+	while (staterr & E1000_RXD_STAT_DD) {
 		ps_page = &rx_ring->ps_page[i];
 		ps_page_dma = &rx_ring->ps_page_dma[i];
 #ifdef CONFIG_E1000_NAPI
-		if(unlikely(*work_done >= work_to_do))
+		if (unlikely(*work_done >= work_to_do))
 			break;
 		(*work_done)++;
 #endif
+		skb = buffer_info->skb;
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+		next_buffer = &rx_ring->buffer_info[i];
+		next_skb = next_buffer->skb;
+
 		cleaned = TRUE;
 		cleaned_count++;
 		pci_unmap_single(pdev, buffer_info->dma,
 				 buffer_info->length,
 				 PCI_DMA_FROMDEVICE);
 
-		skb = buffer_info->skb;
-
-		if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
+		if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
 			E1000_DBG("%s: Packet Split buffers didn't pick up"
 				  " the full packet\n", netdev->name);
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
 		}
 
-		if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+		if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
 		}
 
 		length = le16_to_cpu(rx_desc->wb.middle.length0);
 
-		if(unlikely(!length)) {
+		if (unlikely(!length)) {
 			E1000_DBG("%s: Last part of the packet spanning"
 				  " multiple descriptors\n", netdev->name);
 			dev_kfree_skb_irq(skb);
@@ -3773,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 		/* Good Receive */
 		skb_put(skb, length);
 
-		for(j = 0; j < adapter->rx_ps_pages; j++) {
-			if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
+		for (j = 0; j < adapter->rx_ps_pages; j++) {
+			if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
 				break;
 
 			pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3794,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 				  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
 		skb->protocol = eth_type_trans(skb, netdev);
 
-		if(likely(rx_desc->wb.upper.header_status &
-			  E1000_RXDPS_HDRSTAT_HDRSP)) {
+		if (likely(rx_desc->wb.upper.header_status &
+			  E1000_RXDPS_HDRSTAT_HDRSP))
 			adapter->rx_hdr_split++;
-#ifdef HAVE_RX_ZERO_COPY
-			skb_shinfo(skb)->zero_copy = TRUE;
-#endif
-	        }
 #ifdef CONFIG_E1000_NAPI
-		if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
+		if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
 			vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
 				le16_to_cpu(rx_desc->wb.middle.vlan) &
 				E1000_RXD_SPC_VLAN_MASK);
@@ -3810,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
 			netif_receive_skb(skb);
 		}
 #else /* CONFIG_E1000_NAPI */
-		if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
+		if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
 			vlan_hwaccel_rx(skb, adapter->vlgrp,
 				le16_to_cpu(rx_desc->wb.middle.vlan) &
 				E1000_RXD_SPC_VLAN_MASK);
@@ -3834,6 +3920,9 @@ next_desc:
 			cleaned_count = 0;
 		}
 
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+
 		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
 	}
 	rx_ring->next_to_clean = i;
@@ -3875,7 +3964,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 		}
 
 
-		if(unlikely(!skb)) {
+		if (unlikely(!skb)) {
 			/* Better luck next round */
 			adapter->alloc_rx_buff_failed++;
 			break;
@@ -3940,20 +4029,23 @@ map_skb:
 		rx_desc = E1000_RX_DESC(*rx_ring, i);
 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 
-		if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
-			/* Force memory writes to complete before letting h/w
-			 * know there are new descriptors to fetch.  (Only
-			 * applicable for weak-ordered memory model archs,
-			 * such as IA-64). */
-			wmb();
-			writel(i, adapter->hw.hw_addr + rx_ring->rdt);
-		}
-
-		if(unlikely(++i == rx_ring->count)) i = 0;
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
 		buffer_info = &rx_ring->buffer_info[i];
 	}
 
-	rx_ring->next_to_use = i;
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+	}
 }
 
 /**
@@ -3983,13 +4075,15 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 	while (cleaned_count--) {
 		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 
-		for(j = 0; j < PS_PAGE_BUFFERS; j++) {
+		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
 			if (j < adapter->rx_ps_pages) {
 				if (likely(!ps_page->ps_page[j])) {
 					ps_page->ps_page[j] =
 						alloc_page(GFP_ATOMIC);
-					if (unlikely(!ps_page->ps_page[j]))
+					if (unlikely(!ps_page->ps_page[j])) {
+						adapter->alloc_rx_buff_failed++;
 						goto no_buffers;
+					}
 					ps_page_dma->ps_page_dma[j] =
 						pci_map_page(pdev,
 							    ps_page->ps_page[j],
@@ -3997,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 							    PCI_DMA_FROMDEVICE);
 				}
 				/* Refresh the desc even if buffer_addrs didn't
-				 * change because each write-back erases 
+				 * change because each write-back erases
 				 * this info.
 				 */
 				rx_desc->read.buffer_addr[j+1] =
@@ -4008,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 
 		skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
 
-		if(unlikely(!skb))
+		if (unlikely(!skb)) {
+			adapter->alloc_rx_buff_failed++;
 			break;
+		}
 
 		/* Make buffer alignment 2 beyond a 16 byte boundary
 		 * this will result in a 16 byte aligned IP header after
@@ -4027,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 
 		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 
-		if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
-			/* Force memory writes to complete before letting h/w
-			 * know there are new descriptors to fetch.  (Only
-			 * applicable for weak-ordered memory model archs,
-			 * such as IA-64). */
-			wmb();
-			/* Hardware increments by 16 bytes, but packet split
-			 * descriptors are 32 bytes...so we increment tail
-			 * twice as much.
-			 */
-			writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
-		}
-
-		if(unlikely(++i == rx_ring->count)) i = 0;
+		if (unlikely(++i == rx_ring->count)) i = 0;
 		buffer_info = &rx_ring->buffer_info[i];
 		ps_page = &rx_ring->ps_page[i];
 		ps_page_dma = &rx_ring->ps_page_dma[i];
 	}
 
 no_buffers:
-	rx_ring->next_to_use = i;
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		/* Hardware increments by 16 bytes, but packet split
+		 * descriptors are 32 bytes...so we increment tail
+		 * twice as much.
+		 */
+		writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
+	}
 }
 
 /**
@@ -4061,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
 	uint16_t phy_status;
 	uint16_t phy_ctrl;
 
-	if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
+	if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
 	   !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
 		return;
 
-	if(adapter->smartspeed == 0) {
+	if (adapter->smartspeed == 0) {
 		/* If Master/Slave config fault is asserted twice,
 		 * we assume back-to-back */
 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
-		if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
-		if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
-		if(phy_ctrl & CR_1000T_MS_ENABLE) {
+		if (phy_ctrl & CR_1000T_MS_ENABLE) {
 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
 			e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
 					    phy_ctrl);
 			adapter->smartspeed++;
-			if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+			if (!e1000_phy_setup_autoneg(&adapter->hw) &&
 			   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
 				   	       &phy_ctrl)) {
 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -4088,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
 			}
 		}
 		return;
-	} else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
 		/* If still no link, perhaps using 2/3 pair cable */
 		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
 		phy_ctrl |= CR_1000T_MS_ENABLE;
 		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
-		if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+		if (!e1000_phy_setup_autoneg(&adapter->hw) &&
 		   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
 				     MII_CR_RESTART_AUTO_NEG);
@@ -4101,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
 		}
 	}
 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
-	if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
 		adapter->smartspeed = 0;
 }
 
@@ -4142,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 	uint16_t spddplx;
 	unsigned long flags;
 
-	if(adapter->hw.media_type != e1000_media_type_copper)
+	if (adapter->hw.media_type != e1000_media_type_copper)
 		return -EOPNOTSUPP;
 
 	switch (cmd) {
@@ -4150,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 		data->phy_id = adapter->hw.phy_addr;
 		break;
 	case SIOCGMIIREG:
-		if(!capable(CAP_NET_ADMIN))
+		if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
 		spin_lock_irqsave(&adapter->stats_lock, flags);
-		if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+		if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
 				   &data->val_out)) {
 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
 			return -EIO;
@@ -4161,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
 		break;
 	case SIOCSMIIREG:
-		if(!capable(CAP_NET_ADMIN))
+		if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
-		if(data->reg_num & ~(0x1F))
+		if (data->reg_num & ~(0x1F))
 			return -EFAULT;
 		mii_reg = data->val_in;
 		spin_lock_irqsave(&adapter->stats_lock, flags);
-		if(e1000_write_phy_reg(&adapter->hw, data->reg_num,
+		if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
 					mii_reg)) {
 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
 			return -EIO;
 		}
-		if(adapter->hw.phy_type == e1000_phy_m88) {
+		if (adapter->hw.phy_type == e1000_phy_m88) {
 			switch (data->reg_num) {
 			case PHY_CTRL:
-				if(mii_reg & MII_CR_POWER_DOWN)
+				if (mii_reg & MII_CR_POWER_DOWN)
 					break;
-				if(mii_reg & MII_CR_AUTO_NEG_EN) {
+				if (mii_reg & MII_CR_AUTO_NEG_EN) {
 					adapter->hw.autoneg = 1;
 					adapter->hw.autoneg_advertised = 0x2F;
 				} else {
@@ -4192,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 						   HALF_DUPLEX;
 					retval = e1000_set_spd_dplx(adapter,
 								    spddplx);
-					if(retval) {
+					if (retval) {
 						spin_unlock_irqrestore(
-							&adapter->stats_lock, 
+							&adapter->stats_lock,
 							flags);
 						return retval;
 					}
 				}
-				if(netif_running(adapter->netdev)) {
+				if (netif_running(adapter->netdev)) {
 					e1000_down(adapter);
 					e1000_up(adapter);
 				} else
@@ -4207,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 				break;
 			case M88E1000_PHY_SPEC_CTRL:
 			case M88E1000_EXT_PHY_SPEC_CTRL:
-				if(e1000_phy_reset(&adapter->hw)) {
+				if (e1000_phy_reset(&adapter->hw)) {
 					spin_unlock_irqrestore(
 						&adapter->stats_lock, flags);
 					return -EIO;
@@ -4217,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 		} else {
 			switch (data->reg_num) {
 			case PHY_CTRL:
-				if(mii_reg & MII_CR_POWER_DOWN)
+				if (mii_reg & MII_CR_POWER_DOWN)
 					break;
-				if(netif_running(adapter->netdev)) {
+				if (netif_running(adapter->netdev)) {
 					e1000_down(adapter);
 					e1000_up(adapter);
 				} else
@@ -4241,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
 	struct e1000_adapter *adapter = hw->back;
 	int ret_val = pci_set_mwi(adapter->pdev);
 
-	if(ret_val)
+	if (ret_val)
 		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
 }
 
@@ -4290,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 	e1000_irq_disable(adapter);
 	adapter->vlgrp = grp;
 
-	if(grp) {
+	if (grp) {
 		/* enable VLAN tag insert/strip */
 		ctrl = E1000_READ_REG(&adapter->hw, CTRL);
 		ctrl |= E1000_CTRL_VME;
@@ -4312,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 		rctl = E1000_READ_REG(&adapter->hw, RCTL);
 		rctl &= ~E1000_RCTL_VFE;
 		E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
-		if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
+		if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
 			e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 		}
@@ -4326,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	uint32_t vfta, index;
-	if((adapter->hw.mng_cookie.status &
-		E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
-		(vid == adapter->mng_vlan_id))
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	    (vid == adapter->mng_vlan_id))
 		return;
 	/* add VID to filter table */
 	index = (vid >> 5) & 0x7F;
@@ -4345,13 +4443,13 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
 
 	e1000_irq_disable(adapter);
 
-	if(adapter->vlgrp)
+	if (adapter->vlgrp)
 		adapter->vlgrp->vlan_devices[vid] = NULL;
 
 	e1000_irq_enable(adapter);
 
-	if((adapter->hw.mng_cookie.status &
-		E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
 	    (vid == adapter->mng_vlan_id)) {
 		/* release control to f/w */
 		e1000_release_hw_control(adapter);
@@ -4370,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
 {
 	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
 
-	if(adapter->vlgrp) {
+	if (adapter->vlgrp) {
 		uint16_t vid;
-		for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
-			if(!adapter->vlgrp->vlan_devices[vid])
+		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+			if (!adapter->vlgrp->vlan_devices[vid])
 				continue;
 			e1000_vlan_rx_add_vid(adapter->netdev, vid);
 		}
@@ -4386,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
 	adapter->hw.autoneg = 0;
 
 	/* Fiber NICs only allow 1000 gbps Full duplex */
-	if((adapter->hw.media_type == e1000_media_type_fiber) &&
+	if ((adapter->hw.media_type == e1000_media_type_fiber) &&
 		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
 		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
 		return -EINVAL;
 	}
 
-	switch(spddplx) {
+	switch (spddplx) {
 	case SPEED_10 + DUPLEX_HALF:
 		adapter->hw.forced_speed_duplex = e1000_10_half;
 		break;
@@ -4418,6 +4516,54 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
 }
 
 #ifdef CONFIG_PM
+/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
+ * space versus the 64 bytes that pci_[save|restore]_state handle
+ */
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+static int
+e1000_pci_save_state(struct e1000_adapter *adapter)
+{
+	struct pci_dev *dev = adapter->pdev;
+	int size;
+	int i;
+	if (adapter->hw.mac_type >= e1000_82571)
+		size = PCIE_CONFIG_SPACE_LEN;
+	else
+		size = PCI_CONFIG_SPACE_LEN;
+
+	WARN_ON(adapter->config_space != NULL);
+
+	adapter->config_space = kmalloc(size, GFP_KERNEL);
+	if (!adapter->config_space) {
+		DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
+		return -ENOMEM;
+	}
+	for (i = 0; i < (size / 4); i++)
+		pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
+	return 0;
+}
+
+static void
+e1000_pci_restore_state(struct e1000_adapter *adapter)
+{
+	struct pci_dev *dev = adapter->pdev;
+	int size;
+	int i;
+	if (adapter->config_space == NULL)
+		return;
+	if (adapter->hw.mac_type >= e1000_82571)
+		size = PCIE_CONFIG_SPACE_LEN;
+	else
+		size = PCI_CONFIG_SPACE_LEN;
+	for (i = 0; i < (size / 4); i++)
+		pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
+	kfree(adapter->config_space);
+	adapter->config_space = NULL;
+	return;
+}
+#endif /* CONFIG_PM */
+
 static int
 e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 {
@@ -4429,25 +4575,33 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 
 	netif_device_detach(netdev);
 
-	if(netif_running(netdev))
+	if (netif_running(netdev))
 		e1000_down(adapter);
 
+#ifdef CONFIG_PM
+	/* implement our own version of pci_save_state(pdev) because pci 
+	 * express adapters have larger 256 byte config spaces */
+	retval = e1000_pci_save_state(adapter);
+	if (retval)
+		return retval;
+#endif
+
 	status = E1000_READ_REG(&adapter->hw, STATUS);
-	if(status & E1000_STATUS_LU)
+	if (status & E1000_STATUS_LU)
 		wufc &= ~E1000_WUFC_LNKC;
 
-	if(wufc) {
+	if (wufc) {
 		e1000_setup_rctl(adapter);
 		e1000_set_multi(netdev);
 
 		/* turn on all-multi mode if wake on multicast is enabled */
-		if(adapter->wol & E1000_WUFC_MC) {
+		if (adapter->wol & E1000_WUFC_MC) {
 			rctl = E1000_READ_REG(&adapter->hw, RCTL);
 			rctl |= E1000_RCTL_MPE;
 			E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
 		}
 
-		if(adapter->hw.mac_type >= e1000_82540) {
+		if (adapter->hw.mac_type >= e1000_82540) {
 			ctrl = E1000_READ_REG(&adapter->hw, CTRL);
 			/* advertise wake from D3Cold */
 			#define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4458,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 			E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
 		}
 
-		if(adapter->hw.media_type == e1000_media_type_fiber ||
+		if (adapter->hw.media_type == e1000_media_type_fiber ||
 		   adapter->hw.media_type == e1000_media_type_internal_serdes) {
 			/* keep the laser running in D3 */
 			ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4488,12 +4642,10 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 			DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
 	}
 
-	pci_save_state(pdev);
-
-	if(adapter->hw.mac_type >= e1000_82540 &&
+	if (adapter->hw.mac_type >= e1000_82540 &&
 	   adapter->hw.media_type == e1000_media_type_copper) {
 		manc = E1000_READ_REG(&adapter->hw, MANC);
-		if(manc & E1000_MANC_SMBUS_EN) {
+		if (manc & E1000_MANC_SMBUS_EN) {
 			manc |= E1000_MANC_ARP_EN;
 			E1000_WRITE_REG(&adapter->hw, MANC, manc);
 			retval = pci_enable_wake(pdev, PCI_D3hot, 1);
@@ -4518,6 +4670,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 	return 0;
 }
 
+#ifdef CONFIG_PM
 static int
 e1000_resume(struct pci_dev *pdev)
 {
@@ -4529,6 +4682,7 @@ e1000_resume(struct pci_dev *pdev)
 	retval = pci_set_power_state(pdev, PCI_D0);
 	if (retval)
 		DPRINTK(PROBE, ERR, "Error in setting power state\n");
+	e1000_pci_restore_state(adapter);
 	ret_val = pci_enable_device(pdev);
 	pci_set_master(pdev);
 
@@ -4542,12 +4696,12 @@ e1000_resume(struct pci_dev *pdev)
 	e1000_reset(adapter);
 	E1000_WRITE_REG(&adapter->hw, WUS, ~0);
 
-	if(netif_running(netdev))
+	if (netif_running(netdev))
 		e1000_up(adapter);
 
 	netif_device_attach(netdev);
 
-	if(adapter->hw.mac_type >= e1000_82540 &&
+	if (adapter->hw.mac_type >= e1000_82540 &&
 	   adapter->hw.media_type == e1000_media_type_copper) {
 		manc = E1000_READ_REG(&adapter->hw, MANC);
 		manc &= ~(E1000_MANC_ARP_EN);

+ 1 - 1
drivers/net/e1000/e1000_osdep.h

@@ -47,7 +47,7 @@
 	                	BUG(); \
 			} else { \
 				msleep(x); \
-			} } while(0)
+			} } while (0)
 
 /* Some workarounds require millisecond delays and are run during interrupt
  * context.  Most notably, when establishing link, the phy may need tweaking

+ 22 - 22
drivers/net/e1000/e1000_param.c

@@ -227,7 +227,7 @@ static int __devinit
 e1000_validate_option(int *value, struct e1000_option *opt,
 		struct e1000_adapter *adapter)
 {
-	if(*value == OPTION_UNSET) {
+	if (*value == OPTION_UNSET) {
 		*value = opt->def;
 		return 0;
 	}
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
 		}
 		break;
 	case range_option:
-		if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
 			DPRINTK(PROBE, INFO,
 					"%s set to %i\n", opt->name, *value);
 			return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
 		int i;
 		struct e1000_opt_list *ent;
 
-		for(i = 0; i < opt->arg.l.nr; i++) {
+		for (i = 0; i < opt->arg.l.nr; i++) {
 			ent = &opt->arg.l.p[i];
-			if(*value == ent->i) {
-				if(ent->str[0] != '\0')
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
 					DPRINTK(PROBE, INFO, "%s\n", ent->str);
 				return 0;
 			}
@@ -291,7 +291,7 @@ void __devinit
 e1000_check_options(struct e1000_adapter *adapter)
 {
 	int bd = adapter->bd_number;
-	if(bd >= E1000_MAX_NIC) {
+	if (bd >= E1000_MAX_NIC) {
 		DPRINTK(PROBE, NOTICE,
 		       "Warning: no configuration for board #%i\n", bd);
 		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,7 +315,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 		if (num_TxDescriptors > bd) {
 			tx_ring->count = TxDescriptors[bd];
 			e1000_validate_option(&tx_ring->count, &opt, adapter);
-			E1000_ROUNDUP(tx_ring->count, 
+			E1000_ROUNDUP(tx_ring->count,
 						REQ_TX_DESCRIPTOR_MULTIPLE);
 		} else {
 			tx_ring->count = opt.def;
@@ -341,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 		if (num_RxDescriptors > bd) {
 			rx_ring->count = RxDescriptors[bd];
 			e1000_validate_option(&rx_ring->count, &opt, adapter);
-			E1000_ROUNDUP(rx_ring->count, 
+			E1000_ROUNDUP(rx_ring->count,
 						REQ_RX_DESCRIPTOR_MULTIPLE);
 		} else {
 			rx_ring->count = opt.def;
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
 		if (num_TxIntDelay > bd) {
 			adapter->tx_int_delay = TxIntDelay[bd];
-			e1000_validate_option(&adapter->tx_int_delay, &opt, 
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
 								adapter);
 		} else {
 			adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
 		if (num_TxAbsIntDelay > bd) {
 			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
-			e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
 								adapter);
 		} else {
 			adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
 		if (num_RxIntDelay > bd) {
 			adapter->rx_int_delay = RxIntDelay[bd];
-			e1000_validate_option(&adapter->rx_int_delay, &opt, 
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
 								adapter);
 		} else {
 			adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
 		if (num_RxAbsIntDelay > bd) {
 			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
-			e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
 								adapter);
 		} else {
 			adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
 
 		if (num_InterruptThrottleRate > bd) {
 			adapter->itr = InterruptThrottleRate[bd];
-			switch(adapter->itr) {
+			switch (adapter->itr) {
 			case 0:
-				DPRINTK(PROBE, INFO, "%s turned off\n", 
+				DPRINTK(PROBE, INFO, "%s turned off\n",
 					opt.name);
 				break;
 			case 1:
-				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
 					opt.name);
 				break;
 			default:
-				e1000_validate_option(&adapter->itr, &opt, 
+				e1000_validate_option(&adapter->itr, &opt,
 					adapter);
 				break;
 			}
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 		}
 	}
 
-	switch(adapter->hw.media_type) {
+	switch (adapter->hw.media_type) {
 	case e1000_media_type_fiber:
 	case e1000_media_type_internal_serdes:
 		e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
 e1000_check_fiber_options(struct e1000_adapter *adapter)
 {
 	int bd = adapter->bd_number;
-	if(num_Speed > bd) {
+	if (num_Speed > bd) {
 		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
 		       "parameter ignored\n");
 	}
 
-	if(num_Duplex > bd) {
+	if (num_Duplex > bd) {
 		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
 		       "parameter ignored\n");
 	}
 
-	if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
 		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
 				 "not valid for fiber adapters, "
 				 "parameter ignored\n");
@@ -598,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
 		}
 	}
 
-	if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
 		DPRINTK(PROBE, INFO,
 		       "AutoNeg specified along with Speed or Duplex, "
 		       "parameter ignored\n");
@@ -659,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
 	switch (speed + dplx) {
 	case 0:
 		adapter->hw.autoneg = adapter->fc_autoneg = 1;
-		if((num_Speed > bd) && (speed != 0 || dplx != 0))
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
 			DPRINTK(PROBE, INFO,
 			       "Speed and duplex autonegotiation enabled\n");
 		break;

+ 54 - 28
drivers/net/tg3.c

@@ -69,8 +69,8 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"3.47"
-#define DRV_MODULE_RELDATE	"Dec 28, 2005"
+#define DRV_MODULE_VERSION	"3.48"
+#define DRV_MODULE_RELDATE	"Jan 16, 2006"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -1325,10 +1325,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
 		tw32(0x7d00, val);
 		if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
-			tg3_nvram_lock(tp);
+			int err;
+
+			err = tg3_nvram_lock(tp);
 			tg3_halt_cpu(tp, RX_CPU_BASE);
-			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0);
-			tg3_nvram_unlock(tp);
+			if (!err)
+				tg3_nvram_unlock(tp);
 		}
 	}
 
@@ -4193,14 +4195,19 @@ static int tg3_nvram_lock(struct tg3 *tp)
 	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
 		int i;
 
-		tw32(NVRAM_SWARB, SWARB_REQ_SET1);
-		for (i = 0; i < 8000; i++) {
-			if (tr32(NVRAM_SWARB) & SWARB_GNT1)
-				break;
-			udelay(20);
+		if (tp->nvram_lock_cnt == 0) {
+			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+			for (i = 0; i < 8000; i++) {
+				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+					break;
+				udelay(20);
+			}
+			if (i == 8000) {
+				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+				return -ENODEV;
+			}
 		}
-		if (i == 8000)
-			return -ENODEV;
+		tp->nvram_lock_cnt++;
 	}
 	return 0;
 }
@@ -4208,8 +4215,12 @@ static int tg3_nvram_lock(struct tg3 *tp)
 /* tp->lock is held. */
 static void tg3_nvram_unlock(struct tg3 *tp)
 {
-	if (tp->tg3_flags & TG3_FLAG_NVRAM)
-		tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+		if (tp->nvram_lock_cnt > 0)
+			tp->nvram_lock_cnt--;
+		if (tp->nvram_lock_cnt == 0)
+			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+	}
 }
 
 /* tp->lock is held. */
@@ -4320,8 +4331,13 @@ static int tg3_chip_reset(struct tg3 *tp)
 	void (*write_op)(struct tg3 *, u32, u32);
 	int i;
 
-	if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+	if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
 		tg3_nvram_lock(tp);
+		/* No matching tg3_nvram_unlock() after this because
+		 * chip reset below will undo the nvram lock.
+		 */
+		tp->nvram_lock_cnt = 0;
+	}
 
 	/*
 	 * We must avoid the readl() that normally takes place.
@@ -4717,6 +4733,10 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
 		       (offset == RX_CPU_BASE ? "RX" : "TX"));
 		return -ENODEV;
 	}
+
+	/* Clear firmware's nvram arbitration. */
+	if (tp->tg3_flags & TG3_FLAG_NVRAM)
+		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
 	return 0;
 }
 
@@ -4736,7 +4756,7 @@ struct fw_info {
 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
 				 int cpu_scratch_size, struct fw_info *info)
 {
-	int err, i;
+	int err, lock_err, i;
 	void (*write_op)(struct tg3 *, u32, u32);
 
 	if (cpu_base == TX_CPU_BASE &&
@@ -4755,9 +4775,10 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
 	/* It is possible that bootcode is still loading at this point.
 	 * Get the nvram lock first before halting the cpu.
 	 */
-	tg3_nvram_lock(tp);
+	lock_err = tg3_nvram_lock(tp);
 	err = tg3_halt_cpu(tp, cpu_base);
-	tg3_nvram_unlock(tp);
+	if (!lock_err)
+		tg3_nvram_unlock(tp);
 	if (err)
 		goto out;
 
@@ -8182,7 +8203,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
 		data[1] = 1;
 	}
 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
-		int irq_sync = 0;
+		int err, irq_sync = 0;
 
 		if (netif_running(dev)) {
 			tg3_netif_stop(tp);
@@ -8192,11 +8213,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
 		tg3_full_lock(tp, irq_sync);
 
 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
-		tg3_nvram_lock(tp);
+		err = tg3_nvram_lock(tp);
 		tg3_halt_cpu(tp, RX_CPU_BASE);
 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 			tg3_halt_cpu(tp, TX_CPU_BASE);
-		tg3_nvram_unlock(tp);
+		if (!err)
+			tg3_nvram_unlock(tp);
 
 		if (tg3_test_registers(tp) != 0) {
 			etest->flags |= ETH_TEST_FL_FAILED;
@@ -8588,7 +8610,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
 		tp->tg3_flags |= TG3_FLAG_NVRAM;
 
-		tg3_nvram_lock(tp);
+		if (tg3_nvram_lock(tp)) {
+			printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
+			       "tg3_nvram_init failed.\n", tp->dev->name);
+			return;
+		}
 		tg3_enable_nvram_access(tp);
 
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
@@ -8686,7 +8712,9 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
 	if (offset > NVRAM_ADDR_MSK)
 		return -EINVAL;
 
-	tg3_nvram_lock(tp);
+	ret = tg3_nvram_lock(tp);
+	if (ret)
+		return ret;
 
 	tg3_enable_nvram_access(tp);
 
@@ -8785,10 +8813,6 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
 
 		offset = offset + (pagesize - page_off);
 
-		/* Nvram lock released by tg3_nvram_read() above,
-		 * so need to get it again.
-		 */
-		tg3_nvram_lock(tp);
 		tg3_enable_nvram_access(tp);
 
 		/*
@@ -8925,7 +8949,9 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 	else {
 		u32 grc_mode;
 
-		tg3_nvram_lock(tp);
+		ret = tg3_nvram_lock(tp);
+		if (ret)
+			return ret;
 
 		tg3_enable_nvram_access(tp);
 		if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&

+ 1 - 0
drivers/net/tg3.h

@@ -2275,6 +2275,7 @@ struct tg3 {
 	dma_addr_t			stats_mapping;
 	struct work_struct		reset_task;
 
+	int				nvram_lock_cnt;
 	u32				nvram_size;
 	u32				nvram_pagesize;
 	u32				nvram_jedecnum;

+ 4 - 1
drivers/pci/quirks.c

@@ -1142,6 +1142,9 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
 	case 0x27c4:
 		ich = 7;
 		break;
+	case 0x2828:	/* ICH8M */
+		ich = 8;
+		break;
 	default:
 		/* we do not handle this PCI device */
 		return;
@@ -1161,7 +1164,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
 		else
 			return;			/* not in combined mode */
 	} else {
-		WARN_ON((ich != 6) && (ich != 7));
+		WARN_ON((ich != 6) && (ich != 7) && (ich != 8));
 		tmp &= 0x3;  /* interesting bits 1:0 */
 		if (tmp & (1 << 0))
 			comb = (1 << 2);	/* PATA port 0, SATA port 1 */

+ 10 - 0
drivers/scsi/ahci.c

@@ -276,6 +276,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	  board_ahci }, /* ESB2 */
 	{ PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 	  board_ahci }, /* ICH7-M DH */
+	{ PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	  board_ahci }, /* ICH8 */
+	{ PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	  board_ahci }, /* ICH8 */
+	{ PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	  board_ahci }, /* ICH8 */
+	{ PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	  board_ahci }, /* ICH8M */
+	{ PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	  board_ahci }, /* ICH8M */
 	{ }	/* terminate list */
 };
 

+ 3 - 0
drivers/scsi/ata_piix.c

@@ -157,6 +157,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
 	{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
 	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
 	{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+	{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+	{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+	{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
 
 	{ }	/* terminate list */
 };

+ 63 - 10
drivers/scsi/libata-core.c

@@ -611,6 +611,10 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
 	if (dev->flags & ATA_DFLAG_PIO) {
 		tf->protocol = ATA_PROT_PIO;
 		index = dev->multi_count ? 0 : 8;
+	} else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
+		/* Unable to use DMA due to host limitation */
+		tf->protocol = ATA_PROT_PIO;
+		index = dev->multi_count ? 0 : 4;
 	} else {
 		tf->protocol = ATA_PROT_DMA;
 		index = 16;
@@ -1051,18 +1055,22 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
 {
 	u16 modes;
 
-	/* Usual case. Word 53 indicates word 88 is valid */
-	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+	/* Usual case. Word 53 indicates word 64 is valid */
+	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
 		modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
 		modes <<= 3;
 		modes |= 0x7;
 		return modes;
 	}
 
-	/* If word 88 isn't valid then Word 51 holds the PIO timing number
-	   for the maximum. Turn it into a mask and return it */
-	modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+	/* If word 64 isn't valid then Word 51 high byte holds the PIO timing
+	   number for the maximum. Turn it into a mask and return it */
+	modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
 	return modes;
+	/* But wait.. there's more. Design your standards by committee and
+	   you too can get a free iordy field to process. However its the 
+	   speeds not the modes that are supported... Note drivers using the
+	   timing API will get this right anyway */
 }
 
 struct ata_exec_internal_arg {
@@ -1164,6 +1172,39 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
 	return AC_ERR_OTHER;
 }
 
+/**
+ *	ata_pio_need_iordy	-	check if iordy needed
+ *	@adev: ATA device
+ *
+ *	Check if the current speed of the device requires IORDY. Used
+ *	by various controllers for chip configuration.
+ */
+
+unsigned int ata_pio_need_iordy(const struct ata_device *adev)
+{
+	int pio;
+	int speed = adev->pio_mode - XFER_PIO_0;
+
+	if (speed < 2)
+		return 0;
+	if (speed > 2)
+		return 1;
+		
+	/* If we have no drive specific rule, then PIO 2 is non IORDY */
+
+	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
+		pio = adev->id[ATA_ID_EIDE_PIO];
+		/* Is the speed faster than the drive allows non IORDY ? */
+		if (pio) {
+			/* This is cycle times not frequency - watch the logic! */
+			if (pio > 240)	/* PIO2 is 240nS per cycle */
+				return 1;
+			return 0;
+		}
+	}
+	return 0;
+}
+
 /**
  *	ata_dev_identify - obtain IDENTIFY x DEVICE page
  *	@ap: port on which device we wish to probe resides
@@ -1415,7 +1456,7 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
 		ap->udma_mask &= ATA_UDMA5;
 		ap->host->max_sectors = ATA_MAX_SECTORS;
 		ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
-		ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
+		ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
 	}
 
 	if (ap->ops->dev_config)
@@ -3056,10 +3097,21 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
 			  unsigned int buflen, int do_write)
 {
-	if (ap->flags & ATA_FLAG_MMIO)
-		ata_mmio_data_xfer(ap, buf, buflen, do_write);
-	else
-		ata_pio_data_xfer(ap, buf, buflen, do_write);
+	/* Make the crap hardware pay the costs not the good stuff */
+	if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
+		unsigned long flags;
+		local_irq_save(flags);
+		if (ap->flags & ATA_FLAG_MMIO)
+			ata_mmio_data_xfer(ap, buf, buflen, do_write);
+		else
+			ata_pio_data_xfer(ap, buf, buflen, do_write);
+		local_irq_restore(flags);
+	} else {
+		if (ap->flags & ATA_FLAG_MMIO)
+			ata_mmio_data_xfer(ap, buf, buflen, do_write);
+		else
+			ata_pio_data_xfer(ap, buf, buflen, do_write);
+	}
 }
 
 /**
@@ -5122,6 +5174,7 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
 EXPORT_SYMBOL_GPL(ata_dev_config);
 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
 
+EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
 EXPORT_SYMBOL_GPL(ata_timing_compute);
 EXPORT_SYMBOL_GPL(ata_timing_merge);
 

+ 16 - 0
drivers/scsi/sata_promise.c

@@ -66,6 +66,7 @@ enum {
 	board_2037x		= 0,	/* FastTrak S150 TX2plus */
 	board_20319		= 1,	/* FastTrak S150 TX4 */
 	board_20619		= 2,	/* FastTrak TX4000 */
+	board_20771		= 3,	/* FastTrak TX2300 */
 
 	PDC_HAS_PATA		= (1 << 1), /* PDC20375 has PATA */
 
@@ -190,6 +191,16 @@ static const struct ata_port_info pdc_port_info[] = {
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &pdc_pata_ops,
 	},
+
+	/* board_20771 */
+	{
+		.sht		= &pdc_ata_sht,
+		.host_flags	= PDC_COMMON_FLAGS | ATA_FLAG_SATA,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.port_ops	= &pdc_sata_ops,
+	},
 };
 
 static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -226,6 +237,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
 	{ PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 	  board_20619 },
 
+	{ PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	  board_20771 },
 	{ }	/* terminate list */
 };
 
@@ -706,6 +719,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 	case board_2037x:
 		probe_ent->n_ports = 2;
 		break;
+	case board_20771:
+		probe_ent->n_ports = 2;
+		break;
 	case board_20619:
 		probe_ent->n_ports = 4;
 

+ 1 - 0
drivers/scsi/sata_svw.c

@@ -470,6 +470,7 @@ static const struct pci_device_id k2_sata_pci_tbl[] = {
 	{ 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 	{ 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
 	{ 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+	{ 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 	{ }
 };
 

+ 3 - 6
drivers/video/sbuslib.c

@@ -199,8 +199,7 @@ struct  fbcmap32 {
 #define FBIOPUTCMAP32	_IOW('F', 3, struct fbcmap32)
 #define FBIOGETCMAP32	_IOW('F', 4, struct fbcmap32)
 
-static int fbiogetputcmap(struct file *file, struct fb_info *info,
-		unsigned int cmd, unsigned long arg)
+static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
 	struct fbcmap32 __user *argp = (void __user *)arg;
 	struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
@@ -236,8 +235,7 @@ struct fbcursor32 {
 #define FBIOSCURSOR32	_IOW('F', 24, struct fbcursor32)
 #define FBIOGCURSOR32	_IOW('F', 25, struct fbcursor32)
 
-static int fbiogscursor(struct file *file, struct fb_info *info,
-		unsigned long arg)
+static int fbiogscursor(struct fb_info *info, unsigned long arg)
 {
 	struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
 	struct fbcursor32 __user *argp =  (void __user *)arg;
@@ -263,8 +261,7 @@ static int fbiogscursor(struct file *file, struct fb_info *info,
 	return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
 }
 
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
-		unsigned long arg)
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
 	case FBIOGTYPE:

+ 1 - 1
drivers/video/sbuslib.h

@@ -20,7 +20,7 @@ extern int sbusfb_mmap_helper(struct sbus_mmap_map *map,
 int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
 			struct fb_info *info,
 			int type, int fb_depth, unsigned long fb_size);
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
 		unsigned long arg);
 
 #endif /* _SBUSLIB_H */

+ 26 - 3
fs/xfs/linux-2.6/xfs_aops.c

@@ -336,24 +336,47 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
 }
 
 /*
- * Submit all of the bios for all of the ioends we have saved up,
- * covering the initial writepage page and also any probed pages.
+ * Submit all of the bios for all of the ioends we have saved up, covering the
+ * initial writepage page and also any probed pages.
+ *
+ * Because we may have multiple ioends spanning a page, we need to start
+ * writeback on all the buffers before we submit them for I/O. If we mark the
+ * buffers as we got, then we can end up with a page that only has buffers
+ * marked async write and I/O complete on can occur before we mark the other
+ * buffers async write.
+ *
+ * The end result of this is that we trip a bug in end_page_writeback() because
+ * we call it twice for the one page as the code in end_buffer_async_write()
+ * assumes that all buffers on the page are started at the same time.
+ *
+ * The fix is two passes across the ioend list - one to start writeback on the
+ * bufferheads, and then the second one submit them for I/O.
  */
 STATIC void
 xfs_submit_ioend(
 	xfs_ioend_t		*ioend)
 {
+	xfs_ioend_t		*head = ioend;
 	xfs_ioend_t		*next;
 	struct buffer_head	*bh;
 	struct bio		*bio;
 	sector_t		lastblock = 0;
 
+	/* Pass 1 - start writeback */
+	do {
+		next = ioend->io_list;
+		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+			xfs_start_buffer_writeback(bh);
+		}
+	} while ((ioend = next) != NULL);
+
+	/* Pass 2 - submit I/O */
+	ioend = head;
 	do {
 		next = ioend->io_list;
 		bio = NULL;
 
 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
-			xfs_start_buffer_writeback(bh);
 
 			if (!bio) {
  retry:

+ 2 - 2
include/asm-powerpc/lppaca.h

@@ -31,7 +31,7 @@
 
 /* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
  * alignment is sufficient to prevent this */
-struct __attribute__((__aligned__(0x400))) lppaca {
+struct lppaca {
 //=============================================================================
 // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
 // NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -129,7 +129,7 @@ struct __attribute__((__aligned__(0x400))) lppaca {
 // CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data
 //=============================================================================
 	u8	pmc_save_area[256];	// PMC interrupt Area           x00-xFF
-};
+} __attribute__((__aligned__(0x400)));
 
 extern struct lppaca lppaca[];
 

+ 1 - 0
include/linux/kernel.h

@@ -228,6 +228,7 @@ extern void dump_stack(void);
 	ntohs((addr).s6_addr16[6]), \
 	ntohs((addr).s6_addr16[7])
 #define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
+#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
 
 #if defined(__LITTLE_ENDIAN)
 #define HIPQUAD(addr) \

+ 8 - 3
include/linux/libata.h

@@ -126,16 +126,19 @@ enum {
 
 	ATA_FLAG_SUSPENDED	= (1 << 12), /* port is suspended */
 
+	ATA_FLAG_PIO_LBA48	= (1 << 13), /* Host DMA engine is LBA28 only */
+	ATA_FLAG_IRQ_MASK	= (1 << 14), /* Mask IRQ in PIO xfers */
+
 	ATA_QCFLAG_ACTIVE	= (1 << 1), /* cmd not yet ack'd to scsi lyer */
 	ATA_QCFLAG_SG		= (1 << 3), /* have s/g table? */
 	ATA_QCFLAG_SINGLE	= (1 << 4), /* no s/g, just a single buffer */
 	ATA_QCFLAG_DMAMAP	= ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
 
 	/* various lengths of time */
-	ATA_TMOUT_EDD		= 5 * HZ,	/* hueristic */
+	ATA_TMOUT_EDD		= 5 * HZ,	/* heuristic */
 	ATA_TMOUT_PIO		= 30 * HZ,
-	ATA_TMOUT_BOOT		= 30 * HZ,	/* hueristic */
-	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* hueristic */
+	ATA_TMOUT_BOOT		= 30 * HZ,	/* heuristic */
+	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* heuristic */
 	ATA_TMOUT_CDB		= 30 * HZ,
 	ATA_TMOUT_CDB_QUICK	= 5 * HZ,
 	ATA_TMOUT_INTERNAL	= 30 * HZ,
@@ -499,6 +502,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
 /*
  * Timing helpers
  */
+
+extern unsigned int ata_pio_need_iordy(const struct ata_device *);
 extern int ata_timing_compute(struct ata_device *, unsigned short,
 			      struct ata_timing *, int, int);
 extern void ata_timing_merge(const struct ata_timing *,

+ 0 - 9
include/linux/netfilter_ipv6/ip6t_ah.h

@@ -18,13 +18,4 @@ struct ip6t_ah
 #define IP6T_AH_INV_LEN		0x02	/* Invert the sense of length. */
 #define IP6T_AH_INV_MASK	0x03	/* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_AH_H*/

+ 0 - 9
include/linux/netfilter_ipv6/ip6t_esp.h

@@ -7,15 +7,6 @@ struct ip6t_esp
 	u_int8_t  invflags;			/* Inverse flags */
 };
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 /* Values for "invflags" field in struct ip6t_esp. */
 #define IP6T_ESP_INV_SPI		0x01	/* Invert the sense of spi. */
 #define IP6T_ESP_INV_MASK	0x01	/* All possible flags. */

+ 0 - 9
include/linux/netfilter_ipv6/ip6t_frag.h

@@ -21,13 +21,4 @@ struct ip6t_frag
 #define IP6T_FRAG_INV_LEN	0x02	/* Invert the sense of length. */
 #define IP6T_FRAG_INV_MASK	0x03	/* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_FRAG_H*/

+ 0 - 9
include/linux/netfilter_ipv6/ip6t_opts.h

@@ -20,13 +20,4 @@ struct ip6t_opts
 #define IP6T_OPTS_INV_LEN	0x01	/* Invert the sense of length. */
 #define IP6T_OPTS_INV_MASK	0x01	/* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_OPTS_H*/

+ 0 - 9
include/linux/netfilter_ipv6/ip6t_rt.h

@@ -30,13 +30,4 @@ struct ip6t_rt
 #define IP6T_RT_INV_LEN		0x04	/* Invert the sense of length. */
 #define IP6T_RT_INV_MASK	0x07	/* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_RT_H*/

+ 1 - 1
include/linux/skbuff.h

@@ -926,7 +926,7 @@ static inline int skb_tailroom(const struct sk_buff *skb)
  *	Increase the headroom of an empty &sk_buff by reducing the tail
  *	room. This is only allowed for an empty buffer.
  */
-static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+static inline void skb_reserve(struct sk_buff *skb, int len)
 {
 	skb->data += len;
 	skb->tail += len;

+ 3 - 1
net/bridge/netfilter/ebt_ip.c

@@ -92,7 +92,9 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
 		if (info->invflags & EBT_IP_PROTO)
 			return -EINVAL;
 		if (info->protocol != IPPROTO_TCP &&
-		    info->protocol != IPPROTO_UDP)
+		    info->protocol != IPPROTO_UDP &&
+		    info->protocol != IPPROTO_SCTP &&
+		    info->protocol != IPPROTO_DCCP)
 			 return -EINVAL;
 	}
 	if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])

+ 3 - 1
net/bridge/netfilter/ebt_log.c

@@ -95,7 +95,9 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
 		       "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr),
 		       NIPQUAD(ih->daddr), ih->tos, ih->protocol);
 		if (ih->protocol == IPPROTO_TCP ||
-		    ih->protocol == IPPROTO_UDP) {
+		    ih->protocol == IPPROTO_UDP ||
+		    ih->protocol == IPPROTO_SCTP ||
+		    ih->protocol == IPPROTO_DCCP) {
 			struct tcpudphdr _ports, *pptr;
 
 			pptr = skb_header_pointer(skb, ih->ihl*4,

+ 6 - 7
net/core/filter.c

@@ -74,7 +74,6 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
  * filtering, filter is the array of filter instructions, and
  * len is the number of filter blocks in the array.
  */
- 
 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
 {
 	struct sock_filter *fentry;	/* We walk down these */
@@ -175,7 +174,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
 			continue;
 		case BPF_LD|BPF_W|BPF_ABS:
 			k = fentry->k;
- load_w:
+load_w:
 			ptr = load_pointer(skb, k, 4, &tmp);
 			if (ptr != NULL) {
 				A = ntohl(*(u32 *)ptr);
@@ -184,7 +183,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
 			break;
 		case BPF_LD|BPF_H|BPF_ABS:
 			k = fentry->k;
- load_h:
+load_h:
 			ptr = load_pointer(skb, k, 2, &tmp);
 			if (ptr != NULL) {
 				A = ntohs(*(u16 *)ptr);
@@ -374,7 +373,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
 		case BPF_JMP|BPF_JSET|BPF_K:
 		case BPF_JMP|BPF_JSET|BPF_X:
 			/* for conditionals both must be safe */
- 			if (pc + ftest->jt + 1 >= flen ||
+			if (pc + ftest->jt + 1 >= flen ||
 			    pc + ftest->jf + 1 >= flen)
 				return -EINVAL;
 			break;
@@ -384,7 +383,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
 		}
 	}
 
-        return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
+	return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
 }
 
 /**
@@ -404,8 +403,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 	int err;
 
 	/* Make sure new filter is there and in the right amounts. */
-        if (fprog->filter == NULL)
-                return -EINVAL;
+	if (fprog->filter == NULL)
+		return -EINVAL;
 
 	fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
 	if (!fp)

+ 1 - 1
net/core/netpoll.c

@@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np)
 		}
 	}
 
-	if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr)
+	if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
 		memcpy(np->local_mac, ndev->dev_addr, 6);
 
 	if (!np->local_ip) {

+ 11 - 23
net/core/pktgen.c

@@ -139,6 +139,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/wait.h>
+#include <linux/etherdevice.h>
 #include <net/checksum.h>
 #include <net/ipv6.h>
 #include <net/addrconf.h>
@@ -281,8 +282,8 @@ struct pktgen_dev {
         __u32 src_mac_count; /* How many MACs to iterate through */
         __u32 dst_mac_count; /* How many MACs to iterate through */
         
-        unsigned char dst_mac[6];
-        unsigned char src_mac[6];
+        unsigned char dst_mac[ETH_ALEN];
+        unsigned char src_mac[ETH_ALEN];
         
         __u32 cur_dst_mac_offset;
         __u32 cur_src_mac_offset;
@@ -594,16 +595,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 
 	seq_puts(seq, "     src_mac: ");
 
-	if ((pkt_dev->src_mac[0] == 0) && 
-	    (pkt_dev->src_mac[1] == 0) && 
-	    (pkt_dev->src_mac[2] == 0) && 
-	    (pkt_dev->src_mac[3] == 0) && 
-	    (pkt_dev->src_mac[4] == 0) && 
-	    (pkt_dev->src_mac[5] == 0)) 
-
+	if (is_zero_ether_addr(pkt_dev->src_mac))
 		for (i = 0; i < 6; i++) 
 			seq_printf(seq,  "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? "  " : ":");
-
 	else 
 		for (i = 0; i < 6; i++) 
 			seq_printf(seq,  "%02X%s", pkt_dev->src_mac[i], i == 5 ? "  " : ":");
@@ -1189,9 +1183,9 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
 	}
 	if (!strcmp(name, "dst_mac")) {
 		char *v = valstr;
-                unsigned char old_dmac[6];
+		unsigned char old_dmac[ETH_ALEN];
 		unsigned char *m = pkt_dev->dst_mac;
-                memcpy(old_dmac, pkt_dev->dst_mac, 6);
+		memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
                 
 		len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
                 if (len < 0) { return len; }
@@ -1220,8 +1214,8 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
 		}
 
 		/* Set up Dest MAC */
-                if (memcmp(old_dmac, pkt_dev->dst_mac, 6) != 0) 
-                        memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6);
+		if (compare_ether_addr(old_dmac, pkt_dev->dst_mac))
+			memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
                 
 		sprintf(pg_result, "OK: dstmac");
 		return count;
@@ -1560,17 +1554,11 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
         
         /* Default to the interface's mac if not explicitly set. */
 
-	if ((pkt_dev->src_mac[0] == 0) && 
-	    (pkt_dev->src_mac[1] == 0) && 
-	    (pkt_dev->src_mac[2] == 0) && 
-	    (pkt_dev->src_mac[3] == 0) && 
-	    (pkt_dev->src_mac[4] == 0) && 
-	    (pkt_dev->src_mac[5] == 0)) {
+	if (is_zero_ether_addr(pkt_dev->src_mac))
+	       memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN);
 
-	       memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, 6);
-       }
         /* Set up Dest MAC */
-        memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6);
+	memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
 
         /* Set up pkt size */
         pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;

+ 1 - 1
net/dccp/ackvec.c

@@ -144,7 +144,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
 						 const unsigned char state)
 {
 	unsigned int gap;
-	signed long new_head;
+	long new_head;
 
 	if (av->dccpav_vec_len + packets > av->dccpav_buf_len)
 		return -ENOBUFS;

+ 0 - 1
net/ipv4/netfilter/Makefile

@@ -46,7 +46,6 @@ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 
 # matches
-obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
 obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
 obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o

+ 1 - 0
net/ipv4/netfilter/ip_conntrack_proto_gre.c

@@ -32,6 +32,7 @@
 #include <linux/in.h>
 #include <linux/list.h>
 #include <linux/seq_file.h>
+#include <linux/interrupt.h>
 
 static DEFINE_RWLOCK(ip_ct_gre_lock);
 #define ASSERT_READ_LOCK(x)

+ 5 - 2
net/ipv4/netfilter/ipt_policy.c

@@ -95,7 +95,10 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
 static int match(const struct sk_buff *skb,
                  const struct net_device *in,
                  const struct net_device *out,
-                 const void *matchinfo, int offset, int *hotdrop)
+                 const void *matchinfo,
+                 int offset,
+                 unsigned int protoff,
+                 int *hotdrop)
 {
 	const struct ipt_policy_info *info = matchinfo;
 	int ret;
@@ -113,7 +116,7 @@ static int match(const struct sk_buff *skb,
 	return ret;
 }
 
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip_void,
                       void *matchinfo, unsigned int matchsize,
                       unsigned int hook_mask)
 {

+ 4 - 10
net/ipv4/route.c

@@ -240,9 +240,8 @@ static unsigned			rt_hash_mask;
 static int			rt_hash_log;
 static unsigned int		rt_hash_rnd;
 
-static struct rt_cache_stat *rt_cache_stat;
-#define RT_CACHE_STAT_INC(field)					  \
-		(per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
+static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
+#define RT_CACHE_STAT_INC(field) (__get_cpu_var(rt_cache_stat).field++)
 
 static int rt_intern_hash(unsigned hash, struct rtable *rth,
 				struct rtable **res);
@@ -401,7 +400,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 		if (!cpu_possible(cpu))
 			continue;
 		*pos = cpu+1;
-		return per_cpu_ptr(rt_cache_stat, cpu);
+		return &per_cpu(rt_cache_stat, cpu);
 	}
 	return NULL;
 }
@@ -414,7 +413,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 		if (!cpu_possible(cpu))
 			continue;
 		*pos = cpu+1;
-		return per_cpu_ptr(rt_cache_stat, cpu);
+		return &per_cpu(rt_cache_stat, cpu);
 	}
 	return NULL;
 	
@@ -3160,10 +3159,6 @@ int __init ip_rt_init(void)
 	ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
 	ip_rt_max_size = (rt_hash_mask + 1) * 16;
 
-	rt_cache_stat = alloc_percpu(struct rt_cache_stat);
-	if (!rt_cache_stat)
-		return -ENOMEM;
-
 	devinet_init();
 	ip_fib_init();
 
@@ -3191,7 +3186,6 @@ int __init ip_rt_init(void)
 	if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
 	    !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 
 			    		     proc_net_stat))) {
-		free_percpu(rt_cache_stat);
 		return -ENOMEM;
 	}
 	rtstat_pde->proc_fops = &rt_cpu_seq_fops;

+ 1 - 1
net/ipv6/addrconf.c

@@ -2644,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
 {
 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
 	seq_printf(seq,
-		   NIP6_FMT " %02x %02x %02x %02x %8s\n",
+		   NIP6_SEQFMT " %02x %02x %02x %02x %8s\n",
 		   NIP6(ifp->addr),
 		   ifp->idev->dev->ifindex,
 		   ifp->prefix_len,

+ 1 - 1
net/ipv6/anycast.c

@@ -532,7 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v)
 	struct ac6_iter_state *state = ac6_seq_private(seq);
 
 	seq_printf(seq,
-		   "%-4d %-15s " NIP6_FMT " %5d\n",
+		   "%-4d %-15s " NIP6_SEQFMT " %5d\n",
 		   state->dev->ifindex, state->dev->name,
 		   NIP6(im->aca_addr),
 		   im->aca_users);

+ 2 - 2
net/ipv6/ip6_flowlabel.c

@@ -629,7 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 {
 	while(fl) {
 		seq_printf(seq,
-			   "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n",
+			   "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_SEQFMT " %-4d\n",
 			   (unsigned)ntohl(fl->label),
 			   fl->share,
 			   (unsigned)fl->owner,
@@ -645,7 +645,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 {
 	if (v == SEQ_START_TOKEN)
-		seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n",
+		seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
 			   "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
 	else
 		ip6fl_fl_seq_show(seq, v);

+ 3 - 3
net/ipv6/mcast.c

@@ -2373,7 +2373,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
 	struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
 
 	seq_printf(seq,
-		   "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n", 
+		   "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n", 
 		   state->dev->ifindex, state->dev->name,
 		   NIP6(im->mca_addr),
 		   im->mca_users, im->mca_flags,
@@ -2542,12 +2542,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
 	if (v == SEQ_START_TOKEN) {
 		seq_printf(seq, 
 			   "%3s %6s "
-			   "%39s %39s %6s %6s\n", "Idx",
+			   "%32s %32s %6s %6s\n", "Idx",
 			   "Device", "Multicast Address",
 			   "Source Address", "INC", "EXC");
 	} else {
 		seq_printf(seq,
-			   "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n",
+			   "%3d %6.6s " NIP6_SEQFMT " " NIP6_SEQFMT " %6lu %6lu\n",
 			   state->dev->ifindex, state->dev->name,
 			   NIP6(state->im->mca_addr),
 			   NIP6(psf->sf_addr),

+ 0 - 1
net/ipv6/netfilter/Makefile

@@ -4,7 +4,6 @@
 
 # Link order matters here.
 obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
-obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o

+ 75 - 76
net/ipv6/netfilter/ip6t_dst.c

@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 #endif
 
 /*
- * (Type & 0xC0) >> 6
- * 	0	-> ignorable
- * 	1	-> must drop the packet
- * 	2	-> send ICMP PARM PROB regardless and drop packet
- * 	3	-> Send ICMP if not a multicast address and drop packet
+ *  (Type & 0xC0) >> 6
+ *	0	-> ignorable
+ *	1	-> must drop the packet
+ *	2	-> send ICMP PARM PROB regardless and drop packet
+ *	3	-> Send ICMP if not a multicast address and drop packet
  *  (Type & 0x20) >> 5
- *  	0	-> invariant
- *  	1	-> can change the routing
+ *	0	-> invariant
+ *	1	-> can change the routing
  *  (Type & 0x1F) Type
- *      0	-> Pad1 (only 1 byte!)
- *      1	-> PadN LENGTH info (total length = length + 2)
- *      C0 | 2	-> JUMBO 4 x x x x ( xxxx > 64k )
- *      5	-> RTALERT 2 x x
+ *	0	-> Pad1 (only 1 byte!)
+ *	1	-> PadN LENGTH info (total length = length + 2)
+ *	C0 | 2	-> JUMBO 4 x x x x ( xxxx > 64k )
+ *	5	-> RTALERT 2 x x
  */
 
 static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_opt_hdr _optsh, *oh;
-       const struct ip6t_opts *optinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       u8 _opttype, *tp = NULL;
-       u8 _optlen, *lp = NULL;
-       unsigned int optlen;
-       
+	struct ipv6_opt_hdr _optsh, *oh;
+	const struct ip6t_opts *optinfo = matchinfo;
+	unsigned int temp;
+	unsigned int ptr;
+	unsigned int hdrlen = 0;
+	unsigned int ret = 0;
+	u8 _opttype, *tp = NULL;
+	u8 _optlen, *lp = NULL;
+	unsigned int optlen;
+
 #if HOPBYHOP
 	if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
 #else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
 #endif
 		return 0;
 
-       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
-       if (oh == NULL){
-	       *hotdrop = 1;
-       		return 0;
-       }
-
-       hdrlen = ipv6_optlen(oh);
-       if (skb->len - ptr < hdrlen){
-	       /* Packet smaller than it's length field */
-       		return 0;
-       }
-
-       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
-
-       DEBUGP("len %02X %04X %02X ",
-       		optinfo->hdrlen, hdrlen,
-       		(!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
-
-       ret = (oh != NULL)
-       		&&
-	      	(!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
-
-       ptr += 2;
-       hdrlen -= 2;
-       if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){
-	       return ret;
+	oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+	if (oh == NULL) {
+		*hotdrop = 1;
+		return 0;
+	}
+
+	hdrlen = ipv6_optlen(oh);
+	if (skb->len - ptr < hdrlen) {
+		/* Packet smaller than it's length field */
+		return 0;
+	}
+
+	DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
+
+	DEBUGP("len %02X %04X %02X ",
+	       optinfo->hdrlen, hdrlen,
+	       (!(optinfo->flags & IP6T_OPTS_LEN) ||
+		((optinfo->hdrlen == hdrlen) ^
+		 !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
+
+	ret = (oh != NULL) &&
+	      (!(optinfo->flags & IP6T_OPTS_LEN) ||
+	       ((optinfo->hdrlen == hdrlen) ^
+		!!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
+
+	ptr += 2;
+	hdrlen -= 2;
+	if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
+		return ret;
 	} else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
 		DEBUGP("Not strict - not implemented");
 	} else {
 		DEBUGP("Strict ");
-		DEBUGP("#%d ",optinfo->optsnr);
-		for(temp=0; temp<optinfo->optsnr; temp++){
+		DEBUGP("#%d ", optinfo->optsnr);
+		for (temp = 0; temp < optinfo->optsnr; temp++) {
 			/* type field exists ? */
 			if (hdrlen < 1)
 				break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
 				break;
 
 			/* Type check */
-			if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){
+			if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
 				DEBUGP("Tbad %02X %02X\n",
 				       *tp,
-				       (optinfo->opts[temp] & 0xFF00)>>8);
+				       (optinfo->opts[temp] & 0xFF00) >> 8);
 				return 0;
 			} else {
 				DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
 		}
 		if (temp == optinfo->optsnr)
 			return ret;
-		else return 0;
+		else
+			return 0;
 	}
 
 	return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *info,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+	   const void *info,
+	   void *matchinfo,
+	   unsigned int matchinfosize,
+	   unsigned int hook_mask)
 {
-       const struct ip6t_opts *optsinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
-              DEBUGP("ip6t_opts: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
-              return 0;
-       }
-       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
-              DEBUGP("ip6t_opts: unknown flags %X\n",
-                      optsinfo->invflags);
-              return 0;
-       }
-
-       return 1;
+	const struct ip6t_opts *optsinfo = matchinfo;
+
+	if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
+		DEBUGP("ip6t_opts: matchsize %u != %u\n",
+		       matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
+		return 0;
+	}
+	if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
+		DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
+		return 0;
+	}
+
+	return 1;
 }
 
 static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&opts_match);
+	return ip6t_register_match(&opts_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&opts_match);
+	ip6t_unregister_match(&opts_match);
 }
 
 module_init(init);

+ 34 - 34
net/ipv6/netfilter/ip6t_eui64.c

@@ -27,45 +27,45 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
+	unsigned char eui64[8];
+	int i = 0;
 
-    unsigned char eui64[8];
-    int i=0;
-
-     if ( !(skb->mac.raw >= skb->head
-                && (skb->mac.raw + ETH_HLEN) <= skb->data)
-                && offset != 0) {
-                        *hotdrop = 1;
-                        return 0;
-                }
-    
-    memset(eui64, 0, sizeof(eui64));
-
-    if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
-      if (skb->nh.ipv6h->version == 0x6) { 
-         memcpy(eui64, eth_hdr(skb)->h_source, 3);
-         memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
-	 eui64[3]=0xff;
-	 eui64[4]=0xfe;
-	 eui64[0] |= 0x02;
-
-	 i=0;
-	 while ((skb->nh.ipv6h->saddr.s6_addr[8+i] ==
-			 eui64[i]) && (i<8)) i++;
-
-	 if ( i == 8 )
-	 	return 1;
-      }
-    }
-
-    return 0;
+	if (!(skb->mac.raw >= skb->head &&
+	      (skb->mac.raw + ETH_HLEN) <= skb->data) &&
+	    offset != 0) {
+		*hotdrop = 1;
+		return 0;
+	}
+
+	memset(eui64, 0, sizeof(eui64));
+
+	if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+		if (skb->nh.ipv6h->version == 0x6) {
+			memcpy(eui64, eth_hdr(skb)->h_source, 3);
+			memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
+			eui64[3] = 0xff;
+			eui64[4] = 0xfe;
+			eui64[0] |= 0x02;
+
+			i = 0;
+			while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i])
+			       && (i < 8))
+				i++;
+
+			if (i == 8)
+				return 1;
+		}
+	}
+
+	return 0;
 }
 
 static int
 ip6t_eui64_checkentry(const char *tablename,
-		   const void  *ip,
-		   void *matchinfo,
-		   unsigned int matchsize,
-		   unsigned int hook_mask)
+		      const void *ip,
+		      void *matchinfo,
+		      unsigned int matchsize,
+		      unsigned int hook_mask)
 {
 	if (hook_mask
 	    & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) |

+ 78 - 79
net/ipv6/netfilter/ip6t_frag.c

@@ -31,12 +31,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline int
 id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
 {
-       int r=0;
-       DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
-              min,id,max);
-       r=(id >= min && id <= max) ^ invert;
-       DEBUGP(" result %s\n",r? "PASS" : "FAILED");
-       return r;
+	int r = 0;
+	DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
+	       min, id, max);
+	r = (id >= min && id <= max) ^ invert;
+	DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
+	return r;
 }
 
 static int
@@ -48,92 +48,91 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct frag_hdr _frag, *fh;
-       const struct ip6t_frag *fraginfo = matchinfo;
-       unsigned int ptr;
+	struct frag_hdr _frag, *fh;
+	const struct ip6t_frag *fraginfo = matchinfo;
+	unsigned int ptr;
 
 	if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0)
 		return 0;
 
 	fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
-	if (fh == NULL){
+	if (fh == NULL) {
 		*hotdrop = 1;
 		return 0;
 	}
 
-       DEBUGP("INFO %04X ", fh->frag_off);
-       DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
-       DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
-       DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF));
-       DEBUGP("ID %u %08X\n", ntohl(fh->identification),
-	      ntohl(fh->identification));
-
-       DEBUGP("IPv6 FRAG id %02X ",
-       		(id_match(fraginfo->ids[0], fraginfo->ids[1],
-                           ntohl(fh->identification),
-                           !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))));
-       DEBUGP("res %02X %02X%04X %02X ", 
-       		(fraginfo->flags & IP6T_FRAG_RES), fh->reserved,
-		ntohs(fh->frag_off) & 0x6,
-       		!((fraginfo->flags & IP6T_FRAG_RES)
-			&& (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
-       DEBUGP("first %02X %02X %02X ", 
-       		(fraginfo->flags & IP6T_FRAG_FST),
-		ntohs(fh->frag_off) & ~0x7,
-       		!((fraginfo->flags & IP6T_FRAG_FST)
-			&& (ntohs(fh->frag_off) & ~0x7)));
-       DEBUGP("mf %02X %02X %02X ", 
-       		(fraginfo->flags & IP6T_FRAG_MF),
-		ntohs(fh->frag_off) & IP6_MF,
-       		!((fraginfo->flags & IP6T_FRAG_MF)
-			&& !((ntohs(fh->frag_off) & IP6_MF))));
-       DEBUGP("last %02X %02X %02X\n", 
-       		(fraginfo->flags & IP6T_FRAG_NMF),
-		ntohs(fh->frag_off) & IP6_MF,
-       		!((fraginfo->flags & IP6T_FRAG_NMF)
-			&& (ntohs(fh->frag_off) & IP6_MF)));
-
-       return (fh != NULL)
-       		&&
-       		(id_match(fraginfo->ids[0], fraginfo->ids[1],
-			  ntohl(fh->identification),
-                           !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))
-		&&
-		!((fraginfo->flags & IP6T_FRAG_RES)
-			&& (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
-		&&
-		!((fraginfo->flags & IP6T_FRAG_FST)
-			&& (ntohs(fh->frag_off) & ~0x7))
-		&&
-		!((fraginfo->flags & IP6T_FRAG_MF)
-			&& !(ntohs(fh->frag_off) & IP6_MF))
-		&&
-		!((fraginfo->flags & IP6T_FRAG_NMF)
-			&& (ntohs(fh->frag_off) & IP6_MF));
+	DEBUGP("INFO %04X ", fh->frag_off);
+	DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
+	DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
+	DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF));
+	DEBUGP("ID %u %08X\n", ntohl(fh->identification),
+	       ntohl(fh->identification));
+
+	DEBUGP("IPv6 FRAG id %02X ",
+	       (id_match(fraginfo->ids[0], fraginfo->ids[1],
+			 ntohl(fh->identification),
+			 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))));
+	DEBUGP("res %02X %02X%04X %02X ",
+	       (fraginfo->flags & IP6T_FRAG_RES), fh->reserved,
+	       ntohs(fh->frag_off) & 0x6,
+	       !((fraginfo->flags & IP6T_FRAG_RES)
+		 && (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
+	DEBUGP("first %02X %02X %02X ",
+	       (fraginfo->flags & IP6T_FRAG_FST),
+	       ntohs(fh->frag_off) & ~0x7,
+	       !((fraginfo->flags & IP6T_FRAG_FST)
+		 && (ntohs(fh->frag_off) & ~0x7)));
+	DEBUGP("mf %02X %02X %02X ",
+	       (fraginfo->flags & IP6T_FRAG_MF),
+	       ntohs(fh->frag_off) & IP6_MF,
+	       !((fraginfo->flags & IP6T_FRAG_MF)
+		 && !((ntohs(fh->frag_off) & IP6_MF))));
+	DEBUGP("last %02X %02X %02X\n",
+	       (fraginfo->flags & IP6T_FRAG_NMF),
+	       ntohs(fh->frag_off) & IP6_MF,
+	       !((fraginfo->flags & IP6T_FRAG_NMF)
+		 && (ntohs(fh->frag_off) & IP6_MF)));
+
+	return (fh != NULL)
+	       &&
+	       (id_match(fraginfo->ids[0], fraginfo->ids[1],
+			 ntohl(fh->identification),
+			 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))
+	       &&
+	       !((fraginfo->flags & IP6T_FRAG_RES)
+		 && (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
+	       &&
+	       !((fraginfo->flags & IP6T_FRAG_FST)
+		 && (ntohs(fh->frag_off) & ~0x7))
+	       &&
+	       !((fraginfo->flags & IP6T_FRAG_MF)
+		 && !(ntohs(fh->frag_off) & IP6_MF))
+	       &&
+	       !((fraginfo->flags & IP6T_FRAG_NMF)
+		 && (ntohs(fh->frag_off) & IP6_MF));
 }
 
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *ip,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+	   const void *ip,
+	   void *matchinfo,
+	   unsigned int matchinfosize,
+	   unsigned int hook_mask)
 {
-       const struct ip6t_frag *fraginfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) {
-              DEBUGP("ip6t_frag: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag)));
-              return 0;
-       }
-       if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
-              DEBUGP("ip6t_frag: unknown flags %X\n",
-                      fraginfo->invflags);
-              return 0;
-       }
-
-       return 1;
+	const struct ip6t_frag *fraginfo = matchinfo;
+
+	if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) {
+		DEBUGP("ip6t_frag: matchsize %u != %u\n",
+		       matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag)));
+		return 0;
+	}
+	if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
+		DEBUGP("ip6t_frag: unknown flags %X\n", fraginfo->invflags);
+		return 0;
+	}
+
+	return 1;
 }
 
 static struct ip6t_match frag_match = {
@@ -145,12 +144,12 @@ static struct ip6t_match frag_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&frag_match);
+	return ip6t_register_match(&frag_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&frag_match);
+	ip6t_unregister_match(&frag_match);
 }
 
 module_init(init);

+ 75 - 76
net/ipv6/netfilter/ip6t_hbh.c

@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 #endif
 
 /*
- * (Type & 0xC0) >> 6
- * 	0	-> ignorable
- * 	1	-> must drop the packet
- * 	2	-> send ICMP PARM PROB regardless and drop packet
- * 	3	-> Send ICMP if not a multicast address and drop packet
+ *  (Type & 0xC0) >> 6
+ *	0	-> ignorable
+ *	1	-> must drop the packet
+ *	2	-> send ICMP PARM PROB regardless and drop packet
+ *	3	-> Send ICMP if not a multicast address and drop packet
  *  (Type & 0x20) >> 5
- *  	0	-> invariant
- *  	1	-> can change the routing
+ *	0	-> invariant
+ *	1	-> can change the routing
  *  (Type & 0x1F) Type
- *      0	-> Pad1 (only 1 byte!)
- *      1	-> PadN LENGTH info (total length = length + 2)
- *      C0 | 2	-> JUMBO 4 x x x x ( xxxx > 64k )
- *      5	-> RTALERT 2 x x
+ *	0	-> Pad1 (only 1 byte!)
+ *	1	-> PadN LENGTH info (total length = length + 2)
+ *	C0 | 2	-> JUMBO 4 x x x x ( xxxx > 64k )
+ *	5	-> RTALERT 2 x x
  */
 
 static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_opt_hdr _optsh, *oh;
-       const struct ip6t_opts *optinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       u8 _opttype, *tp = NULL;
-       u8 _optlen, *lp = NULL;
-       unsigned int optlen;
-       
+	struct ipv6_opt_hdr _optsh, *oh;
+	const struct ip6t_opts *optinfo = matchinfo;
+	unsigned int temp;
+	unsigned int ptr;
+	unsigned int hdrlen = 0;
+	unsigned int ret = 0;
+	u8 _opttype, *tp = NULL;
+	u8 _optlen, *lp = NULL;
+	unsigned int optlen;
+
 #if HOPBYHOP
 	if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
 #else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
 #endif
 		return 0;
 
-       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
-       if (oh == NULL){
-	       *hotdrop = 1;
-       		return 0;
-       }
-
-       hdrlen = ipv6_optlen(oh);
-       if (skb->len - ptr < hdrlen){
-	       /* Packet smaller than it's length field */
-       		return 0;
-       }
-
-       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
-
-       DEBUGP("len %02X %04X %02X ",
-       		optinfo->hdrlen, hdrlen,
-       		(!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
-
-       ret = (oh != NULL)
-       		&&
-	      	(!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
-
-       ptr += 2;
-       hdrlen -= 2;
-       if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){
-	       return ret;
+	oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+	if (oh == NULL) {
+		*hotdrop = 1;
+		return 0;
+	}
+
+	hdrlen = ipv6_optlen(oh);
+	if (skb->len - ptr < hdrlen) {
+		/* Packet smaller than it's length field */
+		return 0;
+	}
+
+	DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
+
+	DEBUGP("len %02X %04X %02X ",
+	       optinfo->hdrlen, hdrlen,
+	       (!(optinfo->flags & IP6T_OPTS_LEN) ||
+		((optinfo->hdrlen == hdrlen) ^
+		 !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
+
+	ret = (oh != NULL) &&
+	      (!(optinfo->flags & IP6T_OPTS_LEN) ||
+	       ((optinfo->hdrlen == hdrlen) ^
+		!!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
+
+	ptr += 2;
+	hdrlen -= 2;
+	if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
+		return ret;
 	} else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
 		DEBUGP("Not strict - not implemented");
 	} else {
 		DEBUGP("Strict ");
-		DEBUGP("#%d ",optinfo->optsnr);
-		for(temp=0; temp<optinfo->optsnr; temp++){
+		DEBUGP("#%d ", optinfo->optsnr);
+		for (temp = 0; temp < optinfo->optsnr; temp++) {
 			/* type field exists ? */
 			if (hdrlen < 1)
 				break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
 				break;
 
 			/* Type check */
-			if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){
+			if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
 				DEBUGP("Tbad %02X %02X\n",
 				       *tp,
-				       (optinfo->opts[temp] & 0xFF00)>>8);
+				       (optinfo->opts[temp] & 0xFF00) >> 8);
 				return 0;
 			} else {
 				DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
 		}
 		if (temp == optinfo->optsnr)
 			return ret;
-		else return 0;
+		else
+			return 0;
 	}
 
 	return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *entry,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+	   const void *entry,
+	   void *matchinfo,
+	   unsigned int matchinfosize,
+	   unsigned int hook_mask)
 {
-       const struct ip6t_opts *optsinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
-              DEBUGP("ip6t_opts: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
-              return 0;
-       }
-       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
-              DEBUGP("ip6t_opts: unknown flags %X\n",
-                      optsinfo->invflags);
-              return 0;
-       }
-
-       return 1;
+	const struct ip6t_opts *optsinfo = matchinfo;
+
+	if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
+		DEBUGP("ip6t_opts: matchsize %u != %u\n",
+		       matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
+		return 0;
+	}
+	if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
+		DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
+		return 0;
+	}
+
+	return 1;
 }
 
 static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&opts_match);
+	return ip6t_register_match(&opts_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&opts_match);
+	ip6t_unregister_match(&opts_match);
 }
 
 module_init(init);

+ 39 - 40
net/ipv6/netfilter/ip6t_ipv6header.c

@@ -50,20 +50,20 @@ ipv6header_match(const struct sk_buff *skb,
 	len = skb->len - ptr;
 	temp = 0;
 
-        while (ip6t_ext_hdr(nexthdr)) {
+	while (ip6t_ext_hdr(nexthdr)) {
 		struct ipv6_opt_hdr _hdr, *hp;
-        	int hdrlen;
+		int hdrlen;
 
 		/* Is there enough space for the next ext header? */
-                if (len < (int)sizeof(struct ipv6_opt_hdr))
-                        return 0;
+		if (len < (int)sizeof(struct ipv6_opt_hdr))
+			return 0;
 		/* No more exthdr -> evaluate */
-                if (nexthdr == NEXTHDR_NONE) {
+		if (nexthdr == NEXTHDR_NONE) {
 			temp |= MASK_NONE;
 			break;
 		}
 		/* ESP -> evaluate */
-                if (nexthdr == NEXTHDR_ESP) {
+		if (nexthdr == NEXTHDR_ESP) {
 			temp |= MASK_ESP;
 			break;
 		}
@@ -72,43 +72,43 @@ ipv6header_match(const struct sk_buff *skb,
 		BUG_ON(hp == NULL);
 
 		/* Calculate the header length */
-                if (nexthdr == NEXTHDR_FRAGMENT) {
-                        hdrlen = 8;
-                } else if (nexthdr == NEXTHDR_AUTH)
-                        hdrlen = (hp->hdrlen+2)<<2;
-                else
-                        hdrlen = ipv6_optlen(hp);
+		if (nexthdr == NEXTHDR_FRAGMENT) {
+			hdrlen = 8;
+		} else if (nexthdr == NEXTHDR_AUTH)
+			hdrlen = (hp->hdrlen + 2) << 2;
+		else
+			hdrlen = ipv6_optlen(hp);
 
 		/* set the flag */
-		switch (nexthdr){
-			case NEXTHDR_HOP:
-				temp |= MASK_HOPOPTS;
-				break;
-			case NEXTHDR_ROUTING:
-				temp |= MASK_ROUTING;
-				break;
-			case NEXTHDR_FRAGMENT:
-				temp |= MASK_FRAGMENT;
-				break;
-			case NEXTHDR_AUTH:
-				temp |= MASK_AH;
-				break;
-			case NEXTHDR_DEST:
-				temp |= MASK_DSTOPTS;
-				break;
-			default:
-				return 0;
-				break;
+		switch (nexthdr) {
+		case NEXTHDR_HOP:
+			temp |= MASK_HOPOPTS;
+			break;
+		case NEXTHDR_ROUTING:
+			temp |= MASK_ROUTING;
+			break;
+		case NEXTHDR_FRAGMENT:
+			temp |= MASK_FRAGMENT;
+			break;
+		case NEXTHDR_AUTH:
+			temp |= MASK_AH;
+			break;
+		case NEXTHDR_DEST:
+			temp |= MASK_DSTOPTS;
+			break;
+		default:
+			return 0;
+			break;
 		}
 
-                nexthdr = hp->nexthdr;
-                len -= hdrlen;
-                ptr += hdrlen;
+		nexthdr = hp->nexthdr;
+		len -= hdrlen;
+		ptr += hdrlen;
 		if (ptr > skb->len)
 			break;
-        }
+	}
 
-	if ( (nexthdr != NEXTHDR_NONE ) && (nexthdr != NEXTHDR_ESP) )
+	if ((nexthdr != NEXTHDR_NONE) && (nexthdr != NEXTHDR_ESP))
 		temp |= MASK_PROTO;
 
 	if (info->modeflag)
@@ -137,8 +137,8 @@ ipv6header_checkentry(const char *tablename,
 		return 0;
 
 	/* invflags is 0 or 0xff in hard mode */
-	if ((!info->modeflag) && info->invflags != 0x00
-			      && info->invflags != 0xFF)
+	if ((!info->modeflag) && info->invflags != 0x00 &&
+	    info->invflags != 0xFF)
 		return 0;
 
 	return 1;
@@ -152,7 +152,7 @@ static struct ip6t_match ip6t_ipv6header_match = {
 	.me		= THIS_MODULE,
 };
 
-static int  __init ipv6header_init(void)
+static int __init ipv6header_init(void)
 {
 	return ip6t_register_match(&ip6t_ipv6header_match);
 }
@@ -164,4 +164,3 @@ static void __exit ipv6header_exit(void)
 
 module_init(ipv6header_init);
 module_exit(ipv6header_exit);
-

+ 14 - 14
net/ipv6/netfilter/ip6t_owner.c

@@ -36,14 +36,14 @@ match(const struct sk_buff *skb,
 	if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
 		return 0;
 
-	if(info->match & IP6T_OWNER_UID) {
-		if((skb->sk->sk_socket->file->f_uid != info->uid) ^
+	if (info->match & IP6T_OWNER_UID) {
+		if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
 		    !!(info->invert & IP6T_OWNER_UID))
 			return 0;
 	}
 
-	if(info->match & IP6T_OWNER_GID) {
-		if((skb->sk->sk_socket->file->f_gid != info->gid) ^
+	if (info->match & IP6T_OWNER_GID) {
+		if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
 		    !!(info->invert & IP6T_OWNER_GID))
 			return 0;
 	}
@@ -53,23 +53,23 @@ match(const struct sk_buff *skb,
 
 static int
 checkentry(const char *tablename,
-           const void  *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
+	   const void *ip,
+	   void *matchinfo,
+	   unsigned int matchsize,
+	   unsigned int hook_mask)
 {
 	const struct ip6t_owner_info *info = matchinfo;
 
-        if (hook_mask
-            & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
-                printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
-                return 0;
-        }
+	if (hook_mask
+	    & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
+		printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
+		return 0;
+	}
 
 	if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info)))
 		return 0;
 
-	if (info->match & (IP6T_OWNER_PID|IP6T_OWNER_SID)) {
+	if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) {
 		printk("ipt_owner: pid and sid matching "
 		       "not supported anymore\n");
 		return 0;

+ 1 - 1
net/ipv6/netfilter/ip6t_policy.c

@@ -118,7 +118,7 @@ static int match(const struct sk_buff *skb,
 	return ret;
 }
 
-static int checkentry(const char *tablename, const struct ip6t_ip6 *ip,
+static int checkentry(const char *tablename, const void *ip_void,
                       void *matchinfo, unsigned int matchsize,
                       unsigned int hook_mask)
 {

+ 112 - 103
net/ipv6/netfilter/ip6t_rt.c

@@ -33,12 +33,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline int
 segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
 {
-       int r=0;
-       DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
-              min,id,max);
-       r=(id >= min && id <= max) ^ invert;
-       DEBUGP(" result %s\n",r? "PASS" : "FAILED");
-       return r;
+	int r = 0;
+	DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",
+	       invert ? '!' : ' ', min, id, max);
+	r = (id >= min && id <= max) ^ invert;
+	DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
+	return r;
 }
 
 static int
@@ -50,87 +50,93 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_rt_hdr _route, *rh;
-       const struct ip6t_rt *rtinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       struct in6_addr *ap, _addr;
+	struct ipv6_rt_hdr _route, *rh;
+	const struct ip6t_rt *rtinfo = matchinfo;
+	unsigned int temp;
+	unsigned int ptr;
+	unsigned int hdrlen = 0;
+	unsigned int ret = 0;
+	struct in6_addr *ap, _addr;
 
 	if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0)
 		return 0;
 
-       rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
-       if (rh == NULL){
-	       *hotdrop = 1;
-       		return 0;
-       }
-
-       hdrlen = ipv6_optlen(rh);
-       if (skb->len - ptr < hdrlen){
-	       /* Pcket smaller than its length field */
-       		return 0;
-       }
-
-       DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
-       DEBUGP("TYPE %04X ", rh->type);
-       DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
-
-       DEBUGP("IPv6 RT segsleft %02X ",
-       		(segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                           rh->segments_left,
-                           !!(rtinfo->invflags & IP6T_RT_INV_SGS))));
-       DEBUGP("type %02X %02X %02X ",
-       		rtinfo->rt_type, rh->type, 
-       		(!(rtinfo->flags & IP6T_RT_TYP) ||
-                           ((rtinfo->rt_type == rh->type) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
-       DEBUGP("len %02X %04X %02X ",
-       		rtinfo->hdrlen, hdrlen,
-       		(!(rtinfo->flags & IP6T_RT_LEN) ||
-                           ((rtinfo->hdrlen == hdrlen) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
-       DEBUGP("res %02X %02X %02X ", 
-       		(rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->reserved,
-       		!((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->reserved)));
-
-       ret = (rh != NULL)
-       		&&
-       		(segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                           rh->segments_left,
-                           !!(rtinfo->invflags & IP6T_RT_INV_SGS)))
-		&&
-	      	(!(rtinfo->flags & IP6T_RT_LEN) ||
-                           ((rtinfo->hdrlen == hdrlen) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_LEN)))
-		&&
-       		(!(rtinfo->flags & IP6T_RT_TYP) ||
-                           ((rtinfo->rt_type == rh->type) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
+	rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
+	if (rh == NULL) {
+		*hotdrop = 1;
+		return 0;
+	}
+
+	hdrlen = ipv6_optlen(rh);
+	if (skb->len - ptr < hdrlen) {
+		/* Pcket smaller than its length field */
+		return 0;
+	}
+
+	DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
+	DEBUGP("TYPE %04X ", rh->type);
+	DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
+
+	DEBUGP("IPv6 RT segsleft %02X ",
+	       (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
+			       rh->segments_left,
+			       !!(rtinfo->invflags & IP6T_RT_INV_SGS))));
+	DEBUGP("type %02X %02X %02X ",
+	       rtinfo->rt_type, rh->type,
+	       (!(rtinfo->flags & IP6T_RT_TYP) ||
+		((rtinfo->rt_type == rh->type) ^
+		 !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
+	DEBUGP("len %02X %04X %02X ",
+	       rtinfo->hdrlen, hdrlen,
+	       (!(rtinfo->flags & IP6T_RT_LEN) ||
+		((rtinfo->hdrlen == hdrlen) ^
+		 !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
+	DEBUGP("res %02X %02X %02X ",
+	       (rtinfo->flags & IP6T_RT_RES),
+	       ((struct rt0_hdr *)rh)->reserved,
+	       !((rtinfo->flags & IP6T_RT_RES) &&
+		 (((struct rt0_hdr *)rh)->reserved)));
+
+	ret = (rh != NULL)
+	      &&
+	      (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
+			      rh->segments_left,
+			      !!(rtinfo->invflags & IP6T_RT_INV_SGS)))
+	      &&
+	      (!(rtinfo->flags & IP6T_RT_LEN) ||
+	       ((rtinfo->hdrlen == hdrlen) ^
+		!!(rtinfo->invflags & IP6T_RT_INV_LEN)))
+	      &&
+	      (!(rtinfo->flags & IP6T_RT_TYP) ||
+	       ((rtinfo->rt_type == rh->type) ^
+		!!(rtinfo->invflags & IP6T_RT_INV_TYP)));
 
 	if (ret && (rtinfo->flags & IP6T_RT_RES)) {
 		u_int32_t *rp, _reserved;
 		rp = skb_header_pointer(skb,
-					ptr + offsetof(struct rt0_hdr, reserved),
-					sizeof(_reserved), &_reserved);
+					ptr + offsetof(struct rt0_hdr,
+						       reserved),
+					sizeof(_reserved),
+					&_reserved);
 
 		ret = (*rp == 0);
 	}
 
-	DEBUGP("#%d ",rtinfo->addrnr);
-       if ( !(rtinfo->flags & IP6T_RT_FST) ){
-	       return ret;
+	DEBUGP("#%d ", rtinfo->addrnr);
+	if (!(rtinfo->flags & IP6T_RT_FST)) {
+		return ret;
 	} else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
 		DEBUGP("Not strict ");
-		if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){
+		if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
 			DEBUGP("There isn't enough space\n");
 			return 0;
 		} else {
 			unsigned int i = 0;
 
-			DEBUGP("#%d ",rtinfo->addrnr);
-			for(temp=0; temp<(unsigned int)((hdrlen-8)/16); temp++){
+			DEBUGP("#%d ", rtinfo->addrnr);
+			for (temp = 0;
+			     temp < (unsigned int)((hdrlen - 8) / 16);
+			     temp++) {
 				ap = skb_header_pointer(skb,
 							ptr
 							+ sizeof(struct rt0_hdr)
@@ -141,24 +147,26 @@ match(const struct sk_buff *skb,
 				BUG_ON(ap == NULL);
 
 				if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
-					DEBUGP("i=%d temp=%d;\n",i,temp);
+					DEBUGP("i=%d temp=%d;\n", i, temp);
 					i++;
 				}
-				if (i==rtinfo->addrnr) break;
+				if (i == rtinfo->addrnr)
+					break;
 			}
 			DEBUGP("i=%d #%d\n", i, rtinfo->addrnr);
 			if (i == rtinfo->addrnr)
 				return ret;
-			else return 0;
+			else
+				return 0;
 		}
 	} else {
 		DEBUGP("Strict ");
-		if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){
+		if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
 			DEBUGP("There isn't enough space\n");
 			return 0;
 		} else {
-			DEBUGP("#%d ",rtinfo->addrnr);
-			for(temp=0; temp<rtinfo->addrnr; temp++){
+			DEBUGP("#%d ", rtinfo->addrnr);
+			for (temp = 0; temp < rtinfo->addrnr; temp++) {
 				ap = skb_header_pointer(skb,
 							ptr
 							+ sizeof(struct rt0_hdr)
@@ -171,9 +179,11 @@ match(const struct sk_buff *skb,
 					break;
 			}
 			DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr);
-			if ((temp == rtinfo->addrnr) && (temp == (unsigned int)((hdrlen-8)/16)))
+			if ((temp == rtinfo->addrnr) &&
+			    (temp == (unsigned int)((hdrlen - 8) / 16)))
 				return ret;
-			else return 0;
+			else
+				return 0;
 		}
 	}
 
@@ -183,32 +193,31 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *entry,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+	   const void *entry,
+	   void *matchinfo,
+	   unsigned int matchinfosize,
+	   unsigned int hook_mask)
 {
-       const struct ip6t_rt *rtinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) {
-              DEBUGP("ip6t_rt: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt)));
-              return 0;
-       }
-       if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
-              DEBUGP("ip6t_rt: unknown flags %X\n",
-                      rtinfo->invflags);
-              return 0;
-       }
-       if ( (rtinfo->flags & (IP6T_RT_RES|IP6T_RT_FST_MASK)) && 
-		       (!(rtinfo->flags & IP6T_RT_TYP) || 
-		       (rtinfo->rt_type != 0) || 
-		       (rtinfo->invflags & IP6T_RT_INV_TYP)) ) {
-	      DEBUGP("`--rt-type 0' required before `--rt-0-*'");
-              return 0;
-       }
-
-       return 1;
+	const struct ip6t_rt *rtinfo = matchinfo;
+
+	if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) {
+		DEBUGP("ip6t_rt: matchsize %u != %u\n",
+		       matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt)));
+		return 0;
+	}
+	if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
+		DEBUGP("ip6t_rt: unknown flags %X\n", rtinfo->invflags);
+		return 0;
+	}
+	if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) &&
+	    (!(rtinfo->flags & IP6T_RT_TYP) ||
+	     (rtinfo->rt_type != 0) ||
+	     (rtinfo->invflags & IP6T_RT_INV_TYP))) {
+		DEBUGP("`--rt-type 0' required before `--rt-0-*'");
+		return 0;
+	}
+
+	return 1;
 }
 
 static struct ip6t_match rt_match = {
@@ -220,12 +229,12 @@ static struct ip6t_match rt_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&rt_match);
+	return ip6t_register_match(&rt_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&rt_match);
+	ip6t_unregister_match(&rt_match);
 }
 
 module_init(init);

+ 1 - 1
net/rxrpc/krxtimod.c

@@ -81,7 +81,7 @@ static int krxtimod(void *arg)
 
 	for (;;) {
 		unsigned long jif;
-		signed long timeout;
+		long timeout;
 
 		/* deal with the server being asked to die */
 		if (krxtimod_die) {

+ 6 - 6
net/rxrpc/proc.c

@@ -361,7 +361,7 @@ static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
 static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
 {
 	struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link);
-	signed long timeout;
+	long timeout;
 
 	/* display header on line 1 */
 	if (v == SEQ_START_TOKEN) {
@@ -373,8 +373,8 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
 	/* display one peer per line on subsequent lines */
 	timeout = 0;
 	if (!list_empty(&peer->timeout.link))
-		timeout = (signed long) peer->timeout.timo_jif -
-			(signed long) jiffies;
+		timeout = (long) peer->timeout.timo_jif -
+			(long) jiffies;
 
 	seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
 		   peer->trans->port,
@@ -468,7 +468,7 @@ static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
 static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
 {
 	struct rxrpc_connection *conn;
-	signed long timeout;
+	long timeout;
 
 	conn = list_entry(v, struct rxrpc_connection, proc_link);
 
@@ -484,8 +484,8 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
 	/* display one conn per line on subsequent lines */
 	timeout = 0;
 	if (!list_empty(&conn->timeout.link))
-		timeout = (signed long) conn->timeout.timo_jif -
-			(signed long) jiffies;
+		timeout = (long) conn->timeout.timo_jif -
+			(long) jiffies;
 
 	seq_printf(m,
 		   "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",

+ 3 - 4
net/sched/sch_prio.c

@@ -228,14 +228,13 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
 	}
 	sch_tree_unlock(sch);
 
-	for (i=0; i<=TC_PRIO_MAX; i++) {
-		int band = q->prio2band[i];
-		if (q->queues[band] == &noop_qdisc) {
+	for (i=0; i<q->bands; i++) {
+		if (q->queues[i] == &noop_qdisc) {
 			struct Qdisc *child;
 			child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
 			if (child) {
 				sch_tree_lock(sch);
-				child = xchg(&q->queues[band], child);
+				child = xchg(&q->queues[i], child);
 
 				if (child != &noop_qdisc)
 					qdisc_destroy(child);

+ 4 - 0
net/sched/sch_sfq.c

@@ -144,6 +144,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
 		if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
 		    (iph->protocol == IPPROTO_TCP ||
 		     iph->protocol == IPPROTO_UDP ||
+		     iph->protocol == IPPROTO_SCTP ||
+		     iph->protocol == IPPROTO_DCCP ||
 		     iph->protocol == IPPROTO_ESP))
 			h2 ^= *(((u32*)iph) + iph->ihl);
 		break;
@@ -155,6 +157,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
 		h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
 		if (iph->nexthdr == IPPROTO_TCP ||
 		    iph->nexthdr == IPPROTO_UDP ||
+		    iph->nexthdr == IPPROTO_SCTP ||
+		    iph->nexthdr == IPPROTO_DCCP ||
 		    iph->nexthdr == IPPROTO_ESP)
 			h2 ^= *(u32*)&iph[1];
 		break;

+ 2 - 1
sound/sparc/cs4231.c

@@ -69,13 +69,14 @@ struct sbus_dma_info {
 };
 #endif
 
+struct snd_cs4231;
 struct cs4231_dma_control {
         void		(*prepare)(struct cs4231_dma_control *dma_cont, int dir);
         void		(*enable)(struct cs4231_dma_control *dma_cont, int on);
         int		(*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len);
         unsigned int	(*address)(struct cs4231_dma_control *dma_cont);
         void		(*reset)(struct snd_cs4231 *chip); 
-        void		(*preallocate)(struct snd_cs4231 *chip, struct snd_snd_pcm *pcm); 
+        void		(*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm); 
 #ifdef EBUS_SUPPORT
 	struct		ebus_dma_info	ebus_info;
 #endif